_tensor_docs.py 131 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752
  1. """Adds docstrings to Tensor functions"""
  2. import torch._C
  3. from torch._C import _add_docstr as add_docstr
  4. from ._torch_docs import parse_kwargs, reproducibility_notes
  5. def add_docstr_all(method, docstr):
  6. add_docstr(getattr(torch._C._TensorBase, method), docstr)
  7. common_args = parse_kwargs(
  8. """
  9. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  10. returned Tensor. Default: ``torch.preserve_format``.
  11. """
  12. )
  13. new_common_args = parse_kwargs(
  14. """
  15. size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
  16. shape of the output tensor.
  17. dtype (:class:`torch.dtype`, optional): the desired type of returned tensor.
  18. Default: if None, same :class:`torch.dtype` as this tensor.
  19. device (:class:`torch.device`, optional): the desired device of returned tensor.
  20. Default: if None, same :class:`torch.device` as this tensor.
  21. requires_grad (bool, optional): If autograd should record operations on the
  22. returned tensor. Default: ``False``.
  23. pin_memory (bool, optional): If set, returned tensor would be allocated in
  24. the pinned memory. Works only for CPU tensors. Default: ``False``.
  25. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  26. Default: ``torch.strided``.
  27. """
  28. )
  29. add_docstr_all(
  30. "new_tensor",
  31. """
  32. new_tensor(data, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, \
  33. pin_memory=False) -> Tensor
  34. """
  35. + r"""
  36. Returns a new Tensor with :attr:`data` as the tensor data.
  37. By default, the returned Tensor has the same :class:`torch.dtype` and
  38. :class:`torch.device` as this tensor.
  39. .. warning::
  40. :func:`new_tensor` always copies :attr:`data`. If you have a Tensor
  41. ``data`` and want to avoid a copy, use :func:`torch.Tensor.requires_grad_`
  42. or :func:`torch.Tensor.detach`.
  43. If you have a numpy array and want to avoid a copy, use
  44. :func:`torch.from_numpy`.
  45. .. warning::
  46. When data is a tensor `x`, :func:`new_tensor()` reads out 'the data' from whatever it is passed,
  47. and constructs a leaf variable. Therefore ``tensor.new_tensor(x)`` is equivalent to ``x.clone().detach()``
  48. and ``tensor.new_tensor(x, requires_grad=True)`` is equivalent to ``x.clone().detach().requires_grad_(True)``.
  49. The equivalents using ``clone()`` and ``detach()`` are recommended.
  50. Args:
  51. data (array_like): The returned Tensor copies :attr:`data`.
  52. Keyword args:
  53. {dtype}
  54. {device}
  55. {requires_grad}
  56. {layout}
  57. {pin_memory}
  58. Example::
  59. >>> tensor = torch.ones((2,), dtype=torch.int8)
  60. >>> data = [[0, 1], [2, 3]]
  61. >>> tensor.new_tensor(data)
  62. tensor([[ 0, 1],
  63. [ 2, 3]], dtype=torch.int8)
  64. """.format(
  65. **new_common_args
  66. ),
  67. )
  68. add_docstr_all(
  69. "new_full",
  70. """
  71. new_full(size, fill_value, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, \
  72. pin_memory=False) -> Tensor
  73. """
  74. + r"""
  75. Returns a Tensor of size :attr:`size` filled with :attr:`fill_value`.
  76. By default, the returned Tensor has the same :class:`torch.dtype` and
  77. :class:`torch.device` as this tensor.
  78. Args:
  79. fill_value (scalar): the number to fill the output tensor with.
  80. Keyword args:
  81. {dtype}
  82. {device}
  83. {requires_grad}
  84. {layout}
  85. {pin_memory}
  86. Example::
  87. >>> tensor = torch.ones((2,), dtype=torch.float64)
  88. >>> tensor.new_full((3, 4), 3.141592)
  89. tensor([[ 3.1416, 3.1416, 3.1416, 3.1416],
  90. [ 3.1416, 3.1416, 3.1416, 3.1416],
  91. [ 3.1416, 3.1416, 3.1416, 3.1416]], dtype=torch.float64)
  92. """.format(
  93. **new_common_args
  94. ),
  95. )
  96. add_docstr_all(
  97. "new_empty",
  98. """
  99. new_empty(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, \
  100. pin_memory=False) -> Tensor
  101. """
  102. + r"""
  103. Returns a Tensor of size :attr:`size` filled with uninitialized data.
  104. By default, the returned Tensor has the same :class:`torch.dtype` and
  105. :class:`torch.device` as this tensor.
  106. Args:
  107. size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
  108. shape of the output tensor.
  109. Keyword args:
  110. {dtype}
  111. {device}
  112. {requires_grad}
  113. {layout}
  114. {pin_memory}
  115. Example::
  116. >>> tensor = torch.ones(())
  117. >>> tensor.new_empty((2, 3))
  118. tensor([[ 5.8182e-18, 4.5765e-41, -1.0545e+30],
  119. [ 3.0949e-41, 4.4842e-44, 0.0000e+00]])
  120. """.format(
  121. **new_common_args
  122. ),
  123. )
  124. add_docstr_all(
  125. "new_empty_strided",
  126. """
  127. new_empty_strided(size, stride, dtype=None, device=None, requires_grad=False, layout=torch.strided, \
  128. pin_memory=False) -> Tensor
  129. """
  130. + r"""
  131. Returns a Tensor of size :attr:`size` and strides :attr:`stride` filled with
  132. uninitialized data. By default, the returned Tensor has the same
  133. :class:`torch.dtype` and :class:`torch.device` as this tensor.
  134. Args:
  135. size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
  136. shape of the output tensor.
  137. Keyword args:
  138. {dtype}
  139. {device}
  140. {requires_grad}
  141. {layout}
  142. {pin_memory}
  143. Example::
  144. >>> tensor = torch.ones(())
  145. >>> tensor.new_empty_strided((2, 3), (3, 1))
  146. tensor([[ 5.8182e-18, 4.5765e-41, -1.0545e+30],
  147. [ 3.0949e-41, 4.4842e-44, 0.0000e+00]])
  148. """.format(
  149. **new_common_args
  150. ),
  151. )
  152. add_docstr_all(
  153. "new_ones",
  154. """
  155. new_ones(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, \
  156. pin_memory=False) -> Tensor
  157. """
  158. + r"""
  159. Returns a Tensor of size :attr:`size` filled with ``1``.
  160. By default, the returned Tensor has the same :class:`torch.dtype` and
  161. :class:`torch.device` as this tensor.
  162. Args:
  163. size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
  164. shape of the output tensor.
  165. Keyword args:
  166. {dtype}
  167. {device}
  168. {requires_grad}
  169. {layout}
  170. {pin_memory}
  171. Example::
  172. >>> tensor = torch.tensor((), dtype=torch.int32)
  173. >>> tensor.new_ones((2, 3))
  174. tensor([[ 1, 1, 1],
  175. [ 1, 1, 1]], dtype=torch.int32)
  176. """.format(
  177. **new_common_args
  178. ),
  179. )
  180. add_docstr_all(
  181. "new_zeros",
  182. """
  183. new_zeros(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, \
  184. pin_memory=False) -> Tensor
  185. """
  186. + r"""
  187. Returns a Tensor of size :attr:`size` filled with ``0``.
  188. By default, the returned Tensor has the same :class:`torch.dtype` and
  189. :class:`torch.device` as this tensor.
  190. Args:
  191. size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
  192. shape of the output tensor.
  193. Keyword args:
  194. {dtype}
  195. {device}
  196. {requires_grad}
  197. {layout}
  198. {pin_memory}
  199. Example::
  200. >>> tensor = torch.tensor((), dtype=torch.float64)
  201. >>> tensor.new_zeros((2, 3))
  202. tensor([[ 0., 0., 0.],
  203. [ 0., 0., 0.]], dtype=torch.float64)
  204. """.format(
  205. **new_common_args
  206. ),
  207. )
  208. add_docstr_all(
  209. "abs",
  210. r"""
  211. abs() -> Tensor
  212. See :func:`torch.abs`
  213. """,
  214. )
  215. add_docstr_all(
  216. "abs_",
  217. r"""
  218. abs_() -> Tensor
  219. In-place version of :meth:`~Tensor.abs`
  220. """,
  221. )
  222. add_docstr_all(
  223. "absolute",
  224. r"""
  225. absolute() -> Tensor
  226. Alias for :func:`abs`
  227. """,
  228. )
  229. add_docstr_all(
  230. "absolute_",
  231. r"""
  232. absolute_() -> Tensor
  233. In-place version of :meth:`~Tensor.absolute`
  234. Alias for :func:`abs_`
  235. """,
  236. )
  237. add_docstr_all(
  238. "acos",
  239. r"""
  240. acos() -> Tensor
  241. See :func:`torch.acos`
  242. """,
  243. )
  244. add_docstr_all(
  245. "acos_",
  246. r"""
  247. acos_() -> Tensor
  248. In-place version of :meth:`~Tensor.acos`
  249. """,
  250. )
  251. add_docstr_all(
  252. "arccos",
  253. r"""
  254. arccos() -> Tensor
  255. See :func:`torch.arccos`
  256. """,
  257. )
  258. add_docstr_all(
  259. "arccos_",
  260. r"""
  261. arccos_() -> Tensor
  262. In-place version of :meth:`~Tensor.arccos`
  263. """,
  264. )
  265. add_docstr_all(
  266. "acosh",
  267. r"""
  268. acosh() -> Tensor
  269. See :func:`torch.acosh`
  270. """,
  271. )
  272. add_docstr_all(
  273. "acosh_",
  274. r"""
  275. acosh_() -> Tensor
  276. In-place version of :meth:`~Tensor.acosh`
  277. """,
  278. )
  279. add_docstr_all(
  280. "arccosh",
  281. r"""
  282. acosh() -> Tensor
  283. See :func:`torch.arccosh`
  284. """,
  285. )
  286. add_docstr_all(
  287. "arccosh_",
  288. r"""
  289. acosh_() -> Tensor
  290. In-place version of :meth:`~Tensor.arccosh`
  291. """,
  292. )
  293. add_docstr_all(
  294. "add",
  295. r"""
  296. add(other, *, alpha=1) -> Tensor
  297. Add a scalar or tensor to :attr:`self` tensor. If both :attr:`alpha`
  298. and :attr:`other` are specified, each element of :attr:`other` is scaled by
  299. :attr:`alpha` before being used.
  300. When :attr:`other` is a tensor, the shape of :attr:`other` must be
  301. :ref:`broadcastable <broadcasting-semantics>` with the shape of the underlying
  302. tensor
  303. See :func:`torch.add`
  304. """,
  305. )
  306. add_docstr_all(
  307. "add_",
  308. r"""
  309. add_(other, *, alpha=1) -> Tensor
  310. In-place version of :meth:`~Tensor.add`
  311. """,
  312. )
  313. add_docstr_all(
  314. "addbmm",
  315. r"""
  316. addbmm(batch1, batch2, *, beta=1, alpha=1) -> Tensor
  317. See :func:`torch.addbmm`
  318. """,
  319. )
  320. add_docstr_all(
  321. "addbmm_",
  322. r"""
  323. addbmm_(batch1, batch2, *, beta=1, alpha=1) -> Tensor
  324. In-place version of :meth:`~Tensor.addbmm`
  325. """,
  326. )
  327. add_docstr_all(
  328. "addcdiv",
  329. r"""
  330. addcdiv(tensor1, tensor2, *, value=1) -> Tensor
  331. See :func:`torch.addcdiv`
  332. """,
  333. )
  334. add_docstr_all(
  335. "addcdiv_",
  336. r"""
  337. addcdiv_(tensor1, tensor2, *, value=1) -> Tensor
  338. In-place version of :meth:`~Tensor.addcdiv`
  339. """,
  340. )
  341. add_docstr_all(
  342. "addcmul",
  343. r"""
  344. addcmul(tensor1, tensor2, *, value=1) -> Tensor
  345. See :func:`torch.addcmul`
  346. """,
  347. )
  348. add_docstr_all(
  349. "addcmul_",
  350. r"""
  351. addcmul_(tensor1, tensor2, *, value=1) -> Tensor
  352. In-place version of :meth:`~Tensor.addcmul`
  353. """,
  354. )
  355. add_docstr_all(
  356. "addmm",
  357. r"""
  358. addmm(mat1, mat2, *, beta=1, alpha=1) -> Tensor
  359. See :func:`torch.addmm`
  360. """,
  361. )
  362. add_docstr_all(
  363. "addmm_",
  364. r"""
  365. addmm_(mat1, mat2, *, beta=1, alpha=1) -> Tensor
  366. In-place version of :meth:`~Tensor.addmm`
  367. """,
  368. )
  369. add_docstr_all(
  370. "addmv",
  371. r"""
  372. addmv(mat, vec, *, beta=1, alpha=1) -> Tensor
  373. See :func:`torch.addmv`
  374. """,
  375. )
  376. add_docstr_all(
  377. "addmv_",
  378. r"""
  379. addmv_(mat, vec, *, beta=1, alpha=1) -> Tensor
  380. In-place version of :meth:`~Tensor.addmv`
  381. """,
  382. )
  383. add_docstr_all(
  384. "sspaddmm",
  385. r"""
  386. sspaddmm(mat1, mat2, *, beta=1, alpha=1) -> Tensor
  387. See :func:`torch.sspaddmm`
  388. """,
  389. )
  390. add_docstr_all(
  391. "smm",
  392. r"""
  393. smm(mat) -> Tensor
  394. See :func:`torch.smm`
  395. """,
  396. )
  397. add_docstr_all(
  398. "addr",
  399. r"""
  400. addr(vec1, vec2, *, beta=1, alpha=1) -> Tensor
  401. See :func:`torch.addr`
  402. """,
  403. )
  404. add_docstr_all(
  405. "addr_",
  406. r"""
  407. addr_(vec1, vec2, *, beta=1, alpha=1) -> Tensor
  408. In-place version of :meth:`~Tensor.addr`
  409. """,
  410. )
  411. add_docstr_all(
  412. "align_as",
  413. r"""
  414. align_as(other) -> Tensor
  415. Permutes the dimensions of the :attr:`self` tensor to match the dimension order
  416. in the :attr:`other` tensor, adding size-one dims for any new names.
  417. This operation is useful for explicit broadcasting by names (see examples).
  418. All of the dims of :attr:`self` must be named in order to use this method.
  419. The resulting tensor is a view on the original tensor.
  420. All dimension names of :attr:`self` must be present in ``other.names``.
  421. :attr:`other` may contain named dimensions that are not in ``self.names``;
  422. the output tensor has a size-one dimension for each of those new names.
  423. To align a tensor to a specific order, use :meth:`~Tensor.align_to`.
  424. Examples::
  425. # Example 1: Applying a mask
  426. >>> mask = torch.randint(2, [127, 128], dtype=torch.bool).refine_names('W', 'H')
  427. >>> imgs = torch.randn(32, 128, 127, 3, names=('N', 'H', 'W', 'C'))
  428. >>> imgs.masked_fill_(mask.align_as(imgs), 0)
  429. # Example 2: Applying a per-channel-scale
  430. >>> def scale_channels(input, scale):
  431. >>> scale = scale.refine_names('C')
  432. >>> return input * scale.align_as(input)
  433. >>> num_channels = 3
  434. >>> scale = torch.randn(num_channels, names=('C',))
  435. >>> imgs = torch.rand(32, 128, 128, num_channels, names=('N', 'H', 'W', 'C'))
  436. >>> more_imgs = torch.rand(32, num_channels, 128, 128, names=('N', 'C', 'H', 'W'))
  437. >>> videos = torch.randn(3, num_channels, 128, 128, 128, names=('N', 'C', 'H', 'W', 'D'))
  438. # scale_channels is agnostic to the dimension order of the input
  439. >>> scale_channels(imgs, scale)
  440. >>> scale_channels(more_imgs, scale)
  441. >>> scale_channels(videos, scale)
  442. .. warning::
  443. The named tensor API is experimental and subject to change.
  444. """,
  445. )
  446. add_docstr_all(
  447. "all",
  448. r"""
  449. all(dim=None, keepdim=False) -> Tensor
  450. See :func:`torch.all`
  451. """,
  452. )
  453. add_docstr_all(
  454. "allclose",
  455. r"""
  456. allclose(other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor
  457. See :func:`torch.allclose`
  458. """,
  459. )
  460. add_docstr_all(
  461. "angle",
  462. r"""
  463. angle() -> Tensor
  464. See :func:`torch.angle`
  465. """,
  466. )
  467. add_docstr_all(
  468. "any",
  469. r"""
  470. any(dim=None, keepdim=False) -> Tensor
  471. See :func:`torch.any`
  472. """,
  473. )
  474. add_docstr_all(
  475. "apply_",
  476. r"""
  477. apply_(callable) -> Tensor
  478. Applies the function :attr:`callable` to each element in the tensor, replacing
  479. each element with the value returned by :attr:`callable`.
  480. .. note::
  481. This function only works with CPU tensors and should not be used in code
  482. sections that require high performance.
  483. """,
  484. )
  485. add_docstr_all(
  486. "asin",
  487. r"""
  488. asin() -> Tensor
  489. See :func:`torch.asin`
  490. """,
  491. )
  492. add_docstr_all(
  493. "asin_",
  494. r"""
  495. asin_() -> Tensor
  496. In-place version of :meth:`~Tensor.asin`
  497. """,
  498. )
  499. add_docstr_all(
  500. "arcsin",
  501. r"""
  502. arcsin() -> Tensor
  503. See :func:`torch.arcsin`
  504. """,
  505. )
  506. add_docstr_all(
  507. "arcsin_",
  508. r"""
  509. arcsin_() -> Tensor
  510. In-place version of :meth:`~Tensor.arcsin`
  511. """,
  512. )
  513. add_docstr_all(
  514. "asinh",
  515. r"""
  516. asinh() -> Tensor
  517. See :func:`torch.asinh`
  518. """,
  519. )
  520. add_docstr_all(
  521. "asinh_",
  522. r"""
  523. asinh_() -> Tensor
  524. In-place version of :meth:`~Tensor.asinh`
  525. """,
  526. )
  527. add_docstr_all(
  528. "arcsinh",
  529. r"""
  530. arcsinh() -> Tensor
  531. See :func:`torch.arcsinh`
  532. """,
  533. )
  534. add_docstr_all(
  535. "arcsinh_",
  536. r"""
  537. arcsinh_() -> Tensor
  538. In-place version of :meth:`~Tensor.arcsinh`
  539. """,
  540. )
  541. add_docstr_all(
  542. "as_strided",
  543. r"""
  544. as_strided(size, stride, storage_offset=None) -> Tensor
  545. See :func:`torch.as_strided`
  546. """,
  547. )
  548. add_docstr_all(
  549. "atan",
  550. r"""
  551. atan() -> Tensor
  552. See :func:`torch.atan`
  553. """,
  554. )
  555. add_docstr_all(
  556. "atan_",
  557. r"""
  558. atan_() -> Tensor
  559. In-place version of :meth:`~Tensor.atan`
  560. """,
  561. )
  562. add_docstr_all(
  563. "arctan",
  564. r"""
  565. arctan() -> Tensor
  566. See :func:`torch.arctan`
  567. """,
  568. )
  569. add_docstr_all(
  570. "arctan_",
  571. r"""
  572. arctan_() -> Tensor
  573. In-place version of :meth:`~Tensor.arctan`
  574. """,
  575. )
  576. add_docstr_all(
  577. "atan2",
  578. r"""
  579. atan2(other) -> Tensor
  580. See :func:`torch.atan2`
  581. """,
  582. )
  583. add_docstr_all(
  584. "atan2_",
  585. r"""
  586. atan2_(other) -> Tensor
  587. In-place version of :meth:`~Tensor.atan2`
  588. """,
  589. )
  590. add_docstr_all(
  591. "arctan2",
  592. r"""
  593. arctan2(other) -> Tensor
  594. See :func:`torch.arctan2`
  595. """,
  596. )
  597. add_docstr_all(
  598. "arctan2_",
  599. r"""
  600. atan2_(other) -> Tensor
  601. In-place version of :meth:`~Tensor.arctan2`
  602. """,
  603. )
  604. add_docstr_all(
  605. "atanh",
  606. r"""
  607. atanh() -> Tensor
  608. See :func:`torch.atanh`
  609. """,
  610. )
  611. add_docstr_all(
  612. "atanh_",
  613. r"""
  614. atanh_(other) -> Tensor
  615. In-place version of :meth:`~Tensor.atanh`
  616. """,
  617. )
  618. add_docstr_all(
  619. "arctanh",
  620. r"""
  621. arctanh() -> Tensor
  622. See :func:`torch.arctanh`
  623. """,
  624. )
  625. add_docstr_all(
  626. "arctanh_",
  627. r"""
  628. arctanh_(other) -> Tensor
  629. In-place version of :meth:`~Tensor.arctanh`
  630. """,
  631. )
  632. add_docstr_all(
  633. "baddbmm",
  634. r"""
  635. baddbmm(batch1, batch2, *, beta=1, alpha=1) -> Tensor
  636. See :func:`torch.baddbmm`
  637. """,
  638. )
  639. add_docstr_all(
  640. "baddbmm_",
  641. r"""
  642. baddbmm_(batch1, batch2, *, beta=1, alpha=1) -> Tensor
  643. In-place version of :meth:`~Tensor.baddbmm`
  644. """,
  645. )
  646. add_docstr_all(
  647. "bernoulli",
  648. r"""
  649. bernoulli(*, generator=None) -> Tensor
  650. Returns a result tensor where each :math:`\texttt{result[i]}` is independently
  651. sampled from :math:`\text{Bernoulli}(\texttt{self[i]})`. :attr:`self` must have
  652. floating point ``dtype``, and the result will have the same ``dtype``.
  653. See :func:`torch.bernoulli`
  654. """,
  655. )
  656. add_docstr_all(
  657. "bernoulli_",
  658. r"""
  659. bernoulli_(p=0.5, *, generator=None) -> Tensor
  660. Fills each location of :attr:`self` with an independent sample from
  661. :math:`\text{Bernoulli}(\texttt{p})`. :attr:`self` can have integral
  662. ``dtype``.
  663. :attr:`p` should either be a scalar or tensor containing probabilities to be
  664. used for drawing the binary random number.
  665. If it is a tensor, the :math:`\text{i}^{th}` element of :attr:`self` tensor
  666. will be set to a value sampled from
  667. :math:`\text{Bernoulli}(\texttt{p\_tensor[i]})`. In this case `p` must have
  668. floating point ``dtype``.
  669. See also :meth:`~Tensor.bernoulli` and :func:`torch.bernoulli`
  670. """,
  671. )
  672. add_docstr_all(
  673. "bincount",
  674. r"""
  675. bincount(weights=None, minlength=0) -> Tensor
  676. See :func:`torch.bincount`
  677. """,
  678. )
  679. add_docstr_all(
  680. "bitwise_not",
  681. r"""
  682. bitwise_not() -> Tensor
  683. See :func:`torch.bitwise_not`
  684. """,
  685. )
  686. add_docstr_all(
  687. "bitwise_not_",
  688. r"""
  689. bitwise_not_() -> Tensor
  690. In-place version of :meth:`~Tensor.bitwise_not`
  691. """,
  692. )
  693. add_docstr_all(
  694. "bitwise_and",
  695. r"""
  696. bitwise_and() -> Tensor
  697. See :func:`torch.bitwise_and`
  698. """,
  699. )
  700. add_docstr_all(
  701. "bitwise_and_",
  702. r"""
  703. bitwise_and_() -> Tensor
  704. In-place version of :meth:`~Tensor.bitwise_and`
  705. """,
  706. )
  707. add_docstr_all(
  708. "bitwise_or",
  709. r"""
  710. bitwise_or() -> Tensor
  711. See :func:`torch.bitwise_or`
  712. """,
  713. )
  714. add_docstr_all(
  715. "bitwise_or_",
  716. r"""
  717. bitwise_or_() -> Tensor
  718. In-place version of :meth:`~Tensor.bitwise_or`
  719. """,
  720. )
  721. add_docstr_all(
  722. "bitwise_xor",
  723. r"""
  724. bitwise_xor() -> Tensor
  725. See :func:`torch.bitwise_xor`
  726. """,
  727. )
  728. add_docstr_all(
  729. "bitwise_xor_",
  730. r"""
  731. bitwise_xor_() -> Tensor
  732. In-place version of :meth:`~Tensor.bitwise_xor`
  733. """,
  734. )
  735. add_docstr_all(
  736. "bitwise_left_shift",
  737. r"""
  738. bitwise_left_shift(other) -> Tensor
  739. See :func:`torch.bitwise_left_shift`
  740. """,
  741. )
  742. add_docstr_all(
  743. "bitwise_left_shift_",
  744. r"""
  745. bitwise_left_shift_(other) -> Tensor
  746. In-place version of :meth:`~Tensor.bitwise_left_shift`
  747. """,
  748. )
  749. add_docstr_all(
  750. "bitwise_right_shift",
  751. r"""
  752. bitwise_right_shift(other) -> Tensor
  753. See :func:`torch.bitwise_right_shift`
  754. """,
  755. )
  756. add_docstr_all(
  757. "bitwise_right_shift_",
  758. r"""
  759. bitwise_right_shift_(other) -> Tensor
  760. In-place version of :meth:`~Tensor.bitwise_right_shift`
  761. """,
  762. )
  763. add_docstr_all(
  764. "broadcast_to",
  765. r"""
  766. broadcast_to(shape) -> Tensor
  767. See :func:`torch.broadcast_to`.
  768. """,
  769. )
  770. add_docstr_all(
  771. "logical_and",
  772. r"""
  773. logical_and() -> Tensor
  774. See :func:`torch.logical_and`
  775. """,
  776. )
  777. add_docstr_all(
  778. "logical_and_",
  779. r"""
  780. logical_and_() -> Tensor
  781. In-place version of :meth:`~Tensor.logical_and`
  782. """,
  783. )
  784. add_docstr_all(
  785. "logical_not",
  786. r"""
  787. logical_not() -> Tensor
  788. See :func:`torch.logical_not`
  789. """,
  790. )
  791. add_docstr_all(
  792. "logical_not_",
  793. r"""
  794. logical_not_() -> Tensor
  795. In-place version of :meth:`~Tensor.logical_not`
  796. """,
  797. )
  798. add_docstr_all(
  799. "logical_or",
  800. r"""
  801. logical_or() -> Tensor
  802. See :func:`torch.logical_or`
  803. """,
  804. )
  805. add_docstr_all(
  806. "logical_or_",
  807. r"""
  808. logical_or_() -> Tensor
  809. In-place version of :meth:`~Tensor.logical_or`
  810. """,
  811. )
  812. add_docstr_all(
  813. "logical_xor",
  814. r"""
  815. logical_xor() -> Tensor
  816. See :func:`torch.logical_xor`
  817. """,
  818. )
  819. add_docstr_all(
  820. "logical_xor_",
  821. r"""
  822. logical_xor_() -> Tensor
  823. In-place version of :meth:`~Tensor.logical_xor`
  824. """,
  825. )
  826. add_docstr_all(
  827. "bmm",
  828. r"""
  829. bmm(batch2) -> Tensor
  830. See :func:`torch.bmm`
  831. """,
  832. )
  833. add_docstr_all(
  834. "cauchy_",
  835. r"""
  836. cauchy_(median=0, sigma=1, *, generator=None) -> Tensor
  837. Fills the tensor with numbers drawn from the Cauchy distribution:
  838. .. math::
  839. f(x) = \dfrac{1}{\pi} \dfrac{\sigma}{(x - \text{median})^2 + \sigma^2}
  840. """,
  841. )
  842. add_docstr_all(
  843. "ceil",
  844. r"""
  845. ceil() -> Tensor
  846. See :func:`torch.ceil`
  847. """,
  848. )
  849. add_docstr_all(
  850. "ceil_",
  851. r"""
  852. ceil_() -> Tensor
  853. In-place version of :meth:`~Tensor.ceil`
  854. """,
  855. )
  856. add_docstr_all(
  857. "cholesky",
  858. r"""
  859. cholesky(upper=False) -> Tensor
  860. See :func:`torch.cholesky`
  861. """,
  862. )
  863. add_docstr_all(
  864. "cholesky_solve",
  865. r"""
  866. cholesky_solve(input2, upper=False) -> Tensor
  867. See :func:`torch.cholesky_solve`
  868. """,
  869. )
  870. add_docstr_all(
  871. "cholesky_inverse",
  872. r"""
  873. cholesky_inverse(upper=False) -> Tensor
  874. See :func:`torch.cholesky_inverse`
  875. """,
  876. )
  877. add_docstr_all(
  878. "clamp",
  879. r"""
  880. clamp(min=None, max=None) -> Tensor
  881. See :func:`torch.clamp`
  882. """,
  883. )
  884. add_docstr_all(
  885. "clamp_",
  886. r"""
  887. clamp_(min=None, max=None) -> Tensor
  888. In-place version of :meth:`~Tensor.clamp`
  889. """,
  890. )
  891. add_docstr_all(
  892. "clip",
  893. r"""
  894. clip(min=None, max=None) -> Tensor
  895. Alias for :meth:`~Tensor.clamp`.
  896. """,
  897. )
  898. add_docstr_all(
  899. "clip_",
  900. r"""
  901. clip_(min=None, max=None) -> Tensor
  902. Alias for :meth:`~Tensor.clamp_`.
  903. """,
  904. )
  905. add_docstr_all(
  906. "clone",
  907. r"""
  908. clone(*, memory_format=torch.preserve_format) -> Tensor
  909. See :func:`torch.clone`
  910. """.format(
  911. **common_args
  912. ),
  913. )
  914. add_docstr_all(
  915. "coalesce",
  916. r"""
  917. coalesce() -> Tensor
  918. Returns a coalesced copy of :attr:`self` if :attr:`self` is an
  919. :ref:`uncoalesced tensor <sparse-uncoalesced-coo-docs>`.
  920. Returns :attr:`self` if :attr:`self` is a coalesced tensor.
  921. .. warning::
  922. Throws an error if :attr:`self` is not a sparse COO tensor.
  923. """,
  924. )
  925. add_docstr_all(
  926. "contiguous",
  927. r"""
  928. contiguous(memory_format=torch.contiguous_format) -> Tensor
  929. Returns a contiguous in memory tensor containing the same data as :attr:`self` tensor. If
  930. :attr:`self` tensor is already in the specified memory format, this function returns the
  931. :attr:`self` tensor.
  932. Args:
  933. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  934. returned Tensor. Default: ``torch.contiguous_format``.
  935. """,
  936. )
  937. add_docstr_all(
  938. "copy_",
  939. r"""
  940. copy_(src, non_blocking=False) -> Tensor
  941. Copies the elements from :attr:`src` into :attr:`self` tensor and returns
  942. :attr:`self`.
  943. The :attr:`src` tensor must be :ref:`broadcastable <broadcasting-semantics>`
  944. with the :attr:`self` tensor. It may be of a different data type or reside on a
  945. different device.
  946. Args:
  947. src (Tensor): the source tensor to copy from
  948. non_blocking (bool): if ``True`` and this copy is between CPU and GPU,
  949. the copy may occur asynchronously with respect to the host. For other
  950. cases, this argument has no effect.
  951. """,
  952. )
  953. add_docstr_all(
  954. "conj",
  955. r"""
  956. conj() -> Tensor
  957. See :func:`torch.conj`
  958. """,
  959. )
  960. add_docstr_all(
  961. "conj_physical",
  962. r"""
  963. conj_physical() -> Tensor
  964. See :func:`torch.conj_physical`
  965. """,
  966. )
  967. add_docstr_all(
  968. "conj_physical_",
  969. r"""
  970. conj_physical_() -> Tensor
  971. In-place version of :meth:`~Tensor.conj_physical`
  972. """,
  973. )
  974. add_docstr_all(
  975. "resolve_conj",
  976. r"""
  977. resolve_conj() -> Tensor
  978. See :func:`torch.resolve_conj`
  979. """,
  980. )
  981. add_docstr_all(
  982. "resolve_neg",
  983. r"""
  984. resolve_neg() -> Tensor
  985. See :func:`torch.resolve_neg`
  986. """,
  987. )
  988. add_docstr_all(
  989. "copysign",
  990. r"""
  991. copysign(other) -> Tensor
  992. See :func:`torch.copysign`
  993. """,
  994. )
  995. add_docstr_all(
  996. "copysign_",
  997. r"""
  998. copysign_(other) -> Tensor
  999. In-place version of :meth:`~Tensor.copysign`
  1000. """,
  1001. )
  1002. add_docstr_all(
  1003. "cos",
  1004. r"""
  1005. cos() -> Tensor
  1006. See :func:`torch.cos`
  1007. """,
  1008. )
  1009. add_docstr_all(
  1010. "cos_",
  1011. r"""
  1012. cos_() -> Tensor
  1013. In-place version of :meth:`~Tensor.cos`
  1014. """,
  1015. )
  1016. add_docstr_all(
  1017. "cosh",
  1018. r"""
  1019. cosh() -> Tensor
  1020. See :func:`torch.cosh`
  1021. """,
  1022. )
  1023. add_docstr_all(
  1024. "cosh_",
  1025. r"""
  1026. cosh_() -> Tensor
  1027. In-place version of :meth:`~Tensor.cosh`
  1028. """,
  1029. )
  1030. add_docstr_all(
  1031. "cpu",
  1032. r"""
  1033. cpu(memory_format=torch.preserve_format) -> Tensor
  1034. Returns a copy of this object in CPU memory.
  1035. If this object is already in CPU memory and on the correct device,
  1036. then no copy is performed and the original object is returned.
  1037. Args:
  1038. {memory_format}
  1039. """.format(
  1040. **common_args
  1041. ),
  1042. )
  1043. add_docstr_all(
  1044. "count_nonzero",
  1045. r"""
  1046. count_nonzero(dim=None) -> Tensor
  1047. See :func:`torch.count_nonzero`
  1048. """,
  1049. )
  1050. add_docstr_all(
  1051. "cov",
  1052. r"""
  1053. cov(*, correction=1, fweights=None, aweights=None) -> Tensor
  1054. See :func:`torch.cov`
  1055. """,
  1056. )
  1057. add_docstr_all(
  1058. "corrcoef",
  1059. r"""
  1060. corrcoef() -> Tensor
  1061. See :func:`torch.corrcoef`
  1062. """,
  1063. )
  1064. add_docstr_all(
  1065. "cross",
  1066. r"""
  1067. cross(other, dim=None) -> Tensor
  1068. See :func:`torch.cross`
  1069. """,
  1070. )
  1071. add_docstr_all(
  1072. "cuda",
  1073. r"""
  1074. cuda(device=None, non_blocking=False, memory_format=torch.preserve_format) -> Tensor
  1075. Returns a copy of this object in CUDA memory.
  1076. If this object is already in CUDA memory and on the correct device,
  1077. then no copy is performed and the original object is returned.
  1078. Args:
  1079. device (:class:`torch.device`): The destination GPU device.
  1080. Defaults to the current CUDA device.
  1081. non_blocking (bool): If ``True`` and the source is in pinned memory,
  1082. the copy will be asynchronous with respect to the host.
  1083. Otherwise, the argument has no effect. Default: ``False``.
  1084. {memory_format}
  1085. """.format(
  1086. **common_args
  1087. ),
  1088. )
  1089. add_docstr_all(
  1090. "ipu",
  1091. r"""
  1092. ipu(device=None, non_blocking=False, memory_format=torch.preserve_format) -> Tensor
  1093. Returns a copy of this object in IPU memory.
  1094. If this object is already in IPU memory and on the correct device,
  1095. then no copy is performed and the original object is returned.
  1096. Args:
  1097. device (:class:`torch.device`): The destination IPU device.
  1098. Defaults to the current IPU device.
  1099. non_blocking (bool): If ``True`` and the source is in pinned memory,
  1100. the copy will be asynchronous with respect to the host.
  1101. Otherwise, the argument has no effect. Default: ``False``.
  1102. {memory_format}
  1103. """.format(
  1104. **common_args
  1105. ),
  1106. )
  1107. add_docstr_all(
  1108. "xpu",
  1109. r"""
  1110. xpu(device=None, non_blocking=False, memory_format=torch.preserve_format) -> Tensor
  1111. Returns a copy of this object in XPU memory.
  1112. If this object is already in XPU memory and on the correct device,
  1113. then no copy is performed and the original object is returned.
  1114. Args:
  1115. device (:class:`torch.device`): The destination XPU device.
  1116. Defaults to the current XPU device.
  1117. non_blocking (bool): If ``True`` and the source is in pinned memory,
  1118. the copy will be asynchronous with respect to the host.
  1119. Otherwise, the argument has no effect. Default: ``False``.
  1120. {memory_format}
  1121. """.format(
  1122. **common_args
  1123. ),
  1124. )
  1125. add_docstr_all(
  1126. "logcumsumexp",
  1127. r"""
  1128. logcumsumexp(dim) -> Tensor
  1129. See :func:`torch.logcumsumexp`
  1130. """,
  1131. )
  1132. add_docstr_all(
  1133. "cummax",
  1134. r"""
  1135. cummax(dim) -> (Tensor, Tensor)
  1136. See :func:`torch.cummax`
  1137. """,
  1138. )
  1139. add_docstr_all(
  1140. "cummin",
  1141. r"""
  1142. cummin(dim) -> (Tensor, Tensor)
  1143. See :func:`torch.cummin`
  1144. """,
  1145. )
  1146. add_docstr_all(
  1147. "cumprod",
  1148. r"""
  1149. cumprod(dim, dtype=None) -> Tensor
  1150. See :func:`torch.cumprod`
  1151. """,
  1152. )
  1153. add_docstr_all(
  1154. "cumprod_",
  1155. r"""
  1156. cumprod_(dim, dtype=None) -> Tensor
  1157. In-place version of :meth:`~Tensor.cumprod`
  1158. """,
  1159. )
  1160. add_docstr_all(
  1161. "cumsum",
  1162. r"""
  1163. cumsum(dim, dtype=None) -> Tensor
  1164. See :func:`torch.cumsum`
  1165. """,
  1166. )
  1167. add_docstr_all(
  1168. "cumsum_",
  1169. r"""
  1170. cumsum_(dim, dtype=None) -> Tensor
  1171. In-place version of :meth:`~Tensor.cumsum`
  1172. """,
  1173. )
  1174. add_docstr_all(
  1175. "data_ptr",
  1176. r"""
  1177. data_ptr() -> int
  1178. Returns the address of the first element of :attr:`self` tensor.
  1179. """,
  1180. )
  1181. add_docstr_all(
  1182. "dequantize",
  1183. r"""
  1184. dequantize() -> Tensor
  1185. Given a quantized Tensor, dequantize it and return the dequantized float Tensor.
  1186. """,
  1187. )
  1188. add_docstr_all(
  1189. "dense_dim",
  1190. r"""
  1191. dense_dim() -> int
  1192. Return the number of dense dimensions in a :ref:`sparse tensor <sparse-docs>` :attr:`self`.
  1193. .. note::
  1194. Returns ``len(self.shape)`` if :attr:`self` is not a sparse tensor.
  1195. See also :meth:`Tensor.sparse_dim` and :ref:`hybrid tensors <sparse-hybrid-coo-docs>`.
  1196. """,
  1197. )
  1198. add_docstr_all(
  1199. "diag",
  1200. r"""
  1201. diag(diagonal=0) -> Tensor
  1202. See :func:`torch.diag`
  1203. """,
  1204. )
  1205. add_docstr_all(
  1206. "diag_embed",
  1207. r"""
  1208. diag_embed(offset=0, dim1=-2, dim2=-1) -> Tensor
  1209. See :func:`torch.diag_embed`
  1210. """,
  1211. )
  1212. add_docstr_all(
  1213. "diagflat",
  1214. r"""
  1215. diagflat(offset=0) -> Tensor
  1216. See :func:`torch.diagflat`
  1217. """,
  1218. )
  1219. add_docstr_all(
  1220. "diagonal",
  1221. r"""
  1222. diagonal(offset=0, dim1=0, dim2=1) -> Tensor
  1223. See :func:`torch.diagonal`
  1224. """,
  1225. )
  1226. add_docstr_all(
  1227. "diagonal_scatter",
  1228. r"""
  1229. diagonal_scatter(src, offset=0, dim1=0, dim2=1) -> Tensor
  1230. See :func:`torch.diagonal_scatter`
  1231. """,
  1232. )
  1233. add_docstr_all(
  1234. "as_strided_scatter",
  1235. r"""
  1236. as_strided_scatter(src, size, stride, storage_offset=None) -> Tensor
  1237. See :func:`torch.as_strided_scatter`
  1238. """,
  1239. )
  1240. add_docstr_all(
  1241. "fill_diagonal_",
  1242. r"""
  1243. fill_diagonal_(fill_value, wrap=False) -> Tensor
  1244. Fill the main diagonal of a tensor that has at least 2-dimensions.
  1245. When dims>2, all dimensions of input must be of equal length.
  1246. This function modifies the input tensor in-place, and returns the input tensor.
  1247. Arguments:
  1248. fill_value (Scalar): the fill value
  1249. wrap (bool): the diagonal 'wrapped' after N columns for tall matrices.
  1250. Example::
  1251. >>> a = torch.zeros(3, 3)
  1252. >>> a.fill_diagonal_(5)
  1253. tensor([[5., 0., 0.],
  1254. [0., 5., 0.],
  1255. [0., 0., 5.]])
  1256. >>> b = torch.zeros(7, 3)
  1257. >>> b.fill_diagonal_(5)
  1258. tensor([[5., 0., 0.],
  1259. [0., 5., 0.],
  1260. [0., 0., 5.],
  1261. [0., 0., 0.],
  1262. [0., 0., 0.],
  1263. [0., 0., 0.],
  1264. [0., 0., 0.]])
  1265. >>> c = torch.zeros(7, 3)
  1266. >>> c.fill_diagonal_(5, wrap=True)
  1267. tensor([[5., 0., 0.],
  1268. [0., 5., 0.],
  1269. [0., 0., 5.],
  1270. [0., 0., 0.],
  1271. [5., 0., 0.],
  1272. [0., 5., 0.],
  1273. [0., 0., 5.]])
  1274. """,
  1275. )
  1276. add_docstr_all(
  1277. "floor_divide",
  1278. r"""
  1279. floor_divide(value) -> Tensor
  1280. See :func:`torch.floor_divide`
  1281. """,
  1282. )
  1283. add_docstr_all(
  1284. "floor_divide_",
  1285. r"""
  1286. floor_divide_(value) -> Tensor
  1287. In-place version of :meth:`~Tensor.floor_divide`
  1288. """,
  1289. )
  1290. add_docstr_all(
  1291. "diff",
  1292. r"""
  1293. diff(n=1, dim=-1, prepend=None, append=None) -> Tensor
  1294. See :func:`torch.diff`
  1295. """,
  1296. )
  1297. add_docstr_all(
  1298. "digamma",
  1299. r"""
  1300. digamma() -> Tensor
  1301. See :func:`torch.digamma`
  1302. """,
  1303. )
  1304. add_docstr_all(
  1305. "digamma_",
  1306. r"""
  1307. digamma_() -> Tensor
  1308. In-place version of :meth:`~Tensor.digamma`
  1309. """,
  1310. )
  1311. add_docstr_all(
  1312. "dim",
  1313. r"""
  1314. dim() -> int
  1315. Returns the number of dimensions of :attr:`self` tensor.
  1316. """,
  1317. )
  1318. add_docstr_all(
  1319. "dist",
  1320. r"""
  1321. dist(other, p=2) -> Tensor
  1322. See :func:`torch.dist`
  1323. """,
  1324. )
  1325. add_docstr_all(
  1326. "div",
  1327. r"""
  1328. div(value, *, rounding_mode=None) -> Tensor
  1329. See :func:`torch.div`
  1330. """,
  1331. )
  1332. add_docstr_all(
  1333. "div_",
  1334. r"""
  1335. div_(value, *, rounding_mode=None) -> Tensor
  1336. In-place version of :meth:`~Tensor.div`
  1337. """,
  1338. )
  1339. add_docstr_all(
  1340. "divide",
  1341. r"""
  1342. divide(value, *, rounding_mode=None) -> Tensor
  1343. See :func:`torch.divide`
  1344. """,
  1345. )
  1346. add_docstr_all(
  1347. "divide_",
  1348. r"""
  1349. divide_(value, *, rounding_mode=None) -> Tensor
  1350. In-place version of :meth:`~Tensor.divide`
  1351. """,
  1352. )
  1353. add_docstr_all(
  1354. "dot",
  1355. r"""
  1356. dot(other) -> Tensor
  1357. See :func:`torch.dot`
  1358. """,
  1359. )
  1360. add_docstr_all(
  1361. "element_size",
  1362. r"""
  1363. element_size() -> int
  1364. Returns the size in bytes of an individual element.
  1365. Example::
  1366. >>> torch.tensor([]).element_size()
  1367. 4
  1368. >>> torch.tensor([], dtype=torch.uint8).element_size()
  1369. 1
  1370. """,
  1371. )
  1372. add_docstr_all(
  1373. "eq",
  1374. r"""
  1375. eq(other) -> Tensor
  1376. See :func:`torch.eq`
  1377. """,
  1378. )
  1379. add_docstr_all(
  1380. "eq_",
  1381. r"""
  1382. eq_(other) -> Tensor
  1383. In-place version of :meth:`~Tensor.eq`
  1384. """,
  1385. )
  1386. add_docstr_all(
  1387. "equal",
  1388. r"""
  1389. equal(other) -> bool
  1390. See :func:`torch.equal`
  1391. """,
  1392. )
  1393. add_docstr_all(
  1394. "erf",
  1395. r"""
  1396. erf() -> Tensor
  1397. See :func:`torch.erf`
  1398. """,
  1399. )
  1400. add_docstr_all(
  1401. "erf_",
  1402. r"""
  1403. erf_() -> Tensor
  1404. In-place version of :meth:`~Tensor.erf`
  1405. """,
  1406. )
  1407. add_docstr_all(
  1408. "erfc",
  1409. r"""
  1410. erfc() -> Tensor
  1411. See :func:`torch.erfc`
  1412. """,
  1413. )
  1414. add_docstr_all(
  1415. "erfc_",
  1416. r"""
  1417. erfc_() -> Tensor
  1418. In-place version of :meth:`~Tensor.erfc`
  1419. """,
  1420. )
  1421. add_docstr_all(
  1422. "erfinv",
  1423. r"""
  1424. erfinv() -> Tensor
  1425. See :func:`torch.erfinv`
  1426. """,
  1427. )
  1428. add_docstr_all(
  1429. "erfinv_",
  1430. r"""
  1431. erfinv_() -> Tensor
  1432. In-place version of :meth:`~Tensor.erfinv`
  1433. """,
  1434. )
  1435. add_docstr_all(
  1436. "exp",
  1437. r"""
  1438. exp() -> Tensor
  1439. See :func:`torch.exp`
  1440. """,
  1441. )
  1442. add_docstr_all(
  1443. "exp_",
  1444. r"""
  1445. exp_() -> Tensor
  1446. In-place version of :meth:`~Tensor.exp`
  1447. """,
  1448. )
  1449. add_docstr_all(
  1450. "exp2",
  1451. r"""
  1452. exp2() -> Tensor
  1453. See :func:`torch.exp2`
  1454. """,
  1455. )
  1456. add_docstr_all(
  1457. "exp2_",
  1458. r"""
  1459. exp2_() -> Tensor
  1460. In-place version of :meth:`~Tensor.exp2`
  1461. """,
  1462. )
  1463. add_docstr_all(
  1464. "expm1",
  1465. r"""
  1466. expm1() -> Tensor
  1467. See :func:`torch.expm1`
  1468. """,
  1469. )
  1470. add_docstr_all(
  1471. "expm1_",
  1472. r"""
  1473. expm1_() -> Tensor
  1474. In-place version of :meth:`~Tensor.expm1`
  1475. """,
  1476. )
  1477. add_docstr_all(
  1478. "exponential_",
  1479. r"""
  1480. exponential_(lambd=1, *, generator=None) -> Tensor
  1481. Fills :attr:`self` tensor with elements drawn from the exponential distribution:
  1482. .. math::
  1483. f(x) = \lambda e^{-\lambda x}
  1484. """,
  1485. )
  1486. add_docstr_all(
  1487. "fill_",
  1488. r"""
  1489. fill_(value) -> Tensor
  1490. Fills :attr:`self` tensor with the specified value.
  1491. """,
  1492. )
  1493. add_docstr_all(
  1494. "floor",
  1495. r"""
  1496. floor() -> Tensor
  1497. See :func:`torch.floor`
  1498. """,
  1499. )
  1500. add_docstr_all(
  1501. "flip",
  1502. r"""
  1503. flip(dims) -> Tensor
  1504. See :func:`torch.flip`
  1505. """,
  1506. )
  1507. add_docstr_all(
  1508. "fliplr",
  1509. r"""
  1510. fliplr() -> Tensor
  1511. See :func:`torch.fliplr`
  1512. """,
  1513. )
  1514. add_docstr_all(
  1515. "flipud",
  1516. r"""
  1517. flipud() -> Tensor
  1518. See :func:`torch.flipud`
  1519. """,
  1520. )
  1521. add_docstr_all(
  1522. "roll",
  1523. r"""
  1524. roll(shifts, dims) -> Tensor
  1525. See :func:`torch.roll`
  1526. """,
  1527. )
  1528. add_docstr_all(
  1529. "floor_",
  1530. r"""
  1531. floor_() -> Tensor
  1532. In-place version of :meth:`~Tensor.floor`
  1533. """,
  1534. )
  1535. add_docstr_all(
  1536. "fmod",
  1537. r"""
  1538. fmod(divisor) -> Tensor
  1539. See :func:`torch.fmod`
  1540. """,
  1541. )
  1542. add_docstr_all(
  1543. "fmod_",
  1544. r"""
  1545. fmod_(divisor) -> Tensor
  1546. In-place version of :meth:`~Tensor.fmod`
  1547. """,
  1548. )
  1549. add_docstr_all(
  1550. "frac",
  1551. r"""
  1552. frac() -> Tensor
  1553. See :func:`torch.frac`
  1554. """,
  1555. )
  1556. add_docstr_all(
  1557. "frac_",
  1558. r"""
  1559. frac_() -> Tensor
  1560. In-place version of :meth:`~Tensor.frac`
  1561. """,
  1562. )
  1563. add_docstr_all(
  1564. "frexp",
  1565. r"""
  1566. frexp(input) -> (Tensor mantissa, Tensor exponent)
  1567. See :func:`torch.frexp`
  1568. """,
  1569. )
  1570. add_docstr_all(
  1571. "flatten",
  1572. r"""
  1573. flatten(start_dim=0, end_dim=-1) -> Tensor
  1574. See :func:`torch.flatten`
  1575. """,
  1576. )
  1577. add_docstr_all(
  1578. "gather",
  1579. r"""
  1580. gather(dim, index) -> Tensor
  1581. See :func:`torch.gather`
  1582. """,
  1583. )
  1584. add_docstr_all(
  1585. "gcd",
  1586. r"""
  1587. gcd(other) -> Tensor
  1588. See :func:`torch.gcd`
  1589. """,
  1590. )
  1591. add_docstr_all(
  1592. "gcd_",
  1593. r"""
  1594. gcd_(other) -> Tensor
  1595. In-place version of :meth:`~Tensor.gcd`
  1596. """,
  1597. )
  1598. add_docstr_all(
  1599. "ge",
  1600. r"""
  1601. ge(other) -> Tensor
  1602. See :func:`torch.ge`.
  1603. """,
  1604. )
  1605. add_docstr_all(
  1606. "ge_",
  1607. r"""
  1608. ge_(other) -> Tensor
  1609. In-place version of :meth:`~Tensor.ge`.
  1610. """,
  1611. )
  1612. add_docstr_all(
  1613. "greater_equal",
  1614. r"""
  1615. greater_equal(other) -> Tensor
  1616. See :func:`torch.greater_equal`.
  1617. """,
  1618. )
  1619. add_docstr_all(
  1620. "greater_equal_",
  1621. r"""
  1622. greater_equal_(other) -> Tensor
  1623. In-place version of :meth:`~Tensor.greater_equal`.
  1624. """,
  1625. )
  1626. add_docstr_all(
  1627. "geometric_",
  1628. r"""
  1629. geometric_(p, *, generator=None) -> Tensor
  1630. Fills :attr:`self` tensor with elements drawn from the geometric distribution:
  1631. .. math::
  1632. f(X=k) = (1 - p)^{k - 1} p
  1633. """,
  1634. )
  1635. add_docstr_all(
  1636. "geqrf",
  1637. r"""
  1638. geqrf() -> (Tensor, Tensor)
  1639. See :func:`torch.geqrf`
  1640. """,
  1641. )
  1642. add_docstr_all(
  1643. "ger",
  1644. r"""
  1645. ger(vec2) -> Tensor
  1646. See :func:`torch.ger`
  1647. """,
  1648. )
  1649. add_docstr_all(
  1650. "inner",
  1651. r"""
  1652. inner(other) -> Tensor
  1653. See :func:`torch.inner`.
  1654. """,
  1655. )
  1656. add_docstr_all(
  1657. "outer",
  1658. r"""
  1659. outer(vec2) -> Tensor
  1660. See :func:`torch.outer`.
  1661. """,
  1662. )
  1663. add_docstr_all(
  1664. "hypot",
  1665. r"""
  1666. hypot(other) -> Tensor
  1667. See :func:`torch.hypot`
  1668. """,
  1669. )
  1670. add_docstr_all(
  1671. "hypot_",
  1672. r"""
  1673. hypot_(other) -> Tensor
  1674. In-place version of :meth:`~Tensor.hypot`
  1675. """,
  1676. )
  1677. add_docstr_all(
  1678. "i0",
  1679. r"""
  1680. i0() -> Tensor
  1681. See :func:`torch.i0`
  1682. """,
  1683. )
  1684. add_docstr_all(
  1685. "i0_",
  1686. r"""
  1687. i0_() -> Tensor
  1688. In-place version of :meth:`~Tensor.i0`
  1689. """,
  1690. )
  1691. add_docstr_all(
  1692. "igamma",
  1693. r"""
  1694. igamma(other) -> Tensor
  1695. See :func:`torch.igamma`
  1696. """,
  1697. )
  1698. add_docstr_all(
  1699. "igamma_",
  1700. r"""
  1701. igamma_(other) -> Tensor
  1702. In-place version of :meth:`~Tensor.igamma`
  1703. """,
  1704. )
  1705. add_docstr_all(
  1706. "igammac",
  1707. r"""
  1708. igammac(other) -> Tensor
  1709. See :func:`torch.igammac`
  1710. """,
  1711. )
  1712. add_docstr_all(
  1713. "igammac_",
  1714. r"""
  1715. igammac_(other) -> Tensor
  1716. In-place version of :meth:`~Tensor.igammac`
  1717. """,
  1718. )
  1719. add_docstr_all(
  1720. "indices",
  1721. r"""
  1722. indices() -> Tensor
  1723. Return the indices tensor of a :ref:`sparse COO tensor <sparse-coo-docs>`.
  1724. .. warning::
  1725. Throws an error if :attr:`self` is not a sparse COO tensor.
  1726. See also :meth:`Tensor.values`.
  1727. .. note::
  1728. This method can only be called on a coalesced sparse tensor. See
  1729. :meth:`Tensor.coalesce` for details.
  1730. """,
  1731. )
  1732. add_docstr_all(
  1733. "get_device",
  1734. r"""
  1735. get_device() -> Device ordinal (Integer)
  1736. For CUDA tensors, this function returns the device ordinal of the GPU on which the tensor resides.
  1737. For CPU tensors, this function returns `-1`.
  1738. Example::
  1739. >>> x = torch.randn(3, 4, 5, device='cuda:0')
  1740. >>> x.get_device()
  1741. 0
  1742. >>> x.cpu().get_device()
  1743. -1
  1744. """,
  1745. )
  1746. add_docstr_all(
  1747. "values",
  1748. r"""
  1749. values() -> Tensor
  1750. Return the values tensor of a :ref:`sparse COO tensor <sparse-coo-docs>`.
  1751. .. warning::
  1752. Throws an error if :attr:`self` is not a sparse COO tensor.
  1753. See also :meth:`Tensor.indices`.
  1754. .. note::
  1755. This method can only be called on a coalesced sparse tensor. See
  1756. :meth:`Tensor.coalesce` for details.
  1757. """,
  1758. )
  1759. add_docstr_all(
  1760. "gt",
  1761. r"""
  1762. gt(other) -> Tensor
  1763. See :func:`torch.gt`.
  1764. """,
  1765. )
  1766. add_docstr_all(
  1767. "gt_",
  1768. r"""
  1769. gt_(other) -> Tensor
  1770. In-place version of :meth:`~Tensor.gt`.
  1771. """,
  1772. )
  1773. add_docstr_all(
  1774. "greater",
  1775. r"""
  1776. greater(other) -> Tensor
  1777. See :func:`torch.greater`.
  1778. """,
  1779. )
  1780. add_docstr_all(
  1781. "greater_",
  1782. r"""
  1783. greater_(other) -> Tensor
  1784. In-place version of :meth:`~Tensor.greater`.
  1785. """,
  1786. )
  1787. add_docstr_all(
  1788. "has_names",
  1789. r"""
  1790. Is ``True`` if any of this tensor's dimensions are named. Otherwise, is ``False``.
  1791. """,
  1792. )
  1793. add_docstr_all(
  1794. "hardshrink",
  1795. r"""
  1796. hardshrink(lambd=0.5) -> Tensor
  1797. See :func:`torch.nn.functional.hardshrink`
  1798. """,
  1799. )
  1800. add_docstr_all(
  1801. "heaviside",
  1802. r"""
  1803. heaviside(values) -> Tensor
  1804. See :func:`torch.heaviside`
  1805. """,
  1806. )
  1807. add_docstr_all(
  1808. "heaviside_",
  1809. r"""
  1810. heaviside_(values) -> Tensor
  1811. In-place version of :meth:`~Tensor.heaviside`
  1812. """,
  1813. )
  1814. add_docstr_all(
  1815. "histc",
  1816. r"""
  1817. histc(bins=100, min=0, max=0) -> Tensor
  1818. See :func:`torch.histc`
  1819. """,
  1820. )
  1821. add_docstr_all(
  1822. "histogram",
  1823. r"""
  1824. histogram(input, bins, *, range=None, weight=None, density=False) -> (Tensor, Tensor)
  1825. See :func:`torch.histogram`
  1826. """,
  1827. )
  1828. add_docstr_all(
  1829. "index_add_",
  1830. r"""
  1831. index_add_(dim, index, source, *, alpha=1) -> Tensor
  1832. Accumulate the elements of :attr:`alpha` times ``source`` into the :attr:`self`
  1833. tensor by adding to the indices in the order given in :attr:`index`. For example,
  1834. if ``dim == 0``, ``index[i] == j``, and ``alpha=-1``, then the ``i``\ th row of
  1835. ``source`` is subtracted from the ``j``\ th row of :attr:`self`.
  1836. The :attr:`dim`\ th dimension of ``source`` must have the same size as the
  1837. length of :attr:`index` (which must be a vector), and all other dimensions must
  1838. match :attr:`self`, or an error will be raised.
  1839. For a 3-D tensor the output is given as::
  1840. self[index[i], :, :] += alpha * src[i, :, :] # if dim == 0
  1841. self[:, index[i], :] += alpha * src[:, i, :] # if dim == 1
  1842. self[:, :, index[i]] += alpha * src[:, :, i] # if dim == 2
  1843. Note:
  1844. {forward_reproducibility_note}
  1845. Args:
  1846. dim (int): dimension along which to index
  1847. index (Tensor): indices of ``source`` to select from,
  1848. should have dtype either `torch.int64` or `torch.int32`
  1849. source (Tensor): the tensor containing values to add
  1850. Keyword args:
  1851. alpha (Number): the scalar multiplier for ``source``
  1852. Example::
  1853. >>> x = torch.ones(5, 3)
  1854. >>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
  1855. >>> index = torch.tensor([0, 4, 2])
  1856. >>> x.index_add_(0, index, t)
  1857. tensor([[ 2., 3., 4.],
  1858. [ 1., 1., 1.],
  1859. [ 8., 9., 10.],
  1860. [ 1., 1., 1.],
  1861. [ 5., 6., 7.]])
  1862. >>> x.index_add_(0, index, t, alpha=-1)
  1863. tensor([[ 1., 1., 1.],
  1864. [ 1., 1., 1.],
  1865. [ 1., 1., 1.],
  1866. [ 1., 1., 1.],
  1867. [ 1., 1., 1.]])
  1868. """.format(
  1869. **reproducibility_notes
  1870. ),
  1871. )
  1872. add_docstr_all(
  1873. "index_copy_",
  1874. r"""
  1875. index_copy_(dim, index, tensor) -> Tensor
  1876. Copies the elements of :attr:`tensor` into the :attr:`self` tensor by selecting
  1877. the indices in the order given in :attr:`index`. For example, if ``dim == 0``
  1878. and ``index[i] == j``, then the ``i``\ th row of :attr:`tensor` is copied to the
  1879. ``j``\ th row of :attr:`self`.
  1880. The :attr:`dim`\ th dimension of :attr:`tensor` must have the same size as the
  1881. length of :attr:`index` (which must be a vector), and all other dimensions must
  1882. match :attr:`self`, or an error will be raised.
  1883. .. note::
  1884. If :attr:`index` contains duplicate entries, multiple elements from
  1885. :attr:`tensor` will be copied to the same index of :attr:`self`. The result
  1886. is nondeterministic since it depends on which copy occurs last.
  1887. Args:
  1888. dim (int): dimension along which to index
  1889. index (LongTensor): indices of :attr:`tensor` to select from
  1890. tensor (Tensor): the tensor containing values to copy
  1891. Example::
  1892. >>> x = torch.zeros(5, 3)
  1893. >>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
  1894. >>> index = torch.tensor([0, 4, 2])
  1895. >>> x.index_copy_(0, index, t)
  1896. tensor([[ 1., 2., 3.],
  1897. [ 0., 0., 0.],
  1898. [ 7., 8., 9.],
  1899. [ 0., 0., 0.],
  1900. [ 4., 5., 6.]])
  1901. """,
  1902. )
  1903. add_docstr_all(
  1904. "index_fill_",
  1905. r"""
  1906. index_fill_(dim, index, value) -> Tensor
  1907. Fills the elements of the :attr:`self` tensor with value :attr:`value` by
  1908. selecting the indices in the order given in :attr:`index`.
  1909. Args:
  1910. dim (int): dimension along which to index
  1911. index (LongTensor): indices of :attr:`self` tensor to fill in
  1912. value (float): the value to fill with
  1913. Example::
  1914. >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
  1915. >>> index = torch.tensor([0, 2])
  1916. >>> x.index_fill_(1, index, -1)
  1917. tensor([[-1., 2., -1.],
  1918. [-1., 5., -1.],
  1919. [-1., 8., -1.]])
  1920. """,
  1921. )
  1922. add_docstr_all(
  1923. "index_put_",
  1924. r"""
  1925. index_put_(indices, values, accumulate=False) -> Tensor
  1926. Puts values from the tensor :attr:`values` into the tensor :attr:`self` using
  1927. the indices specified in :attr:`indices` (which is a tuple of Tensors). The
  1928. expression ``tensor.index_put_(indices, values)`` is equivalent to
  1929. ``tensor[indices] = values``. Returns :attr:`self`.
  1930. If :attr:`accumulate` is ``True``, the elements in :attr:`values` are added to
  1931. :attr:`self`. If accumulate is ``False``, the behavior is undefined if indices
  1932. contain duplicate elements.
  1933. Args:
  1934. indices (tuple of LongTensor): tensors used to index into `self`.
  1935. values (Tensor): tensor of same dtype as `self`.
  1936. accumulate (bool): whether to accumulate into self
  1937. """,
  1938. )
  1939. add_docstr_all(
  1940. "index_put",
  1941. r"""
  1942. index_put(indices, values, accumulate=False) -> Tensor
  1943. Out-place version of :meth:`~Tensor.index_put_`.
  1944. """,
  1945. )
  1946. add_docstr_all(
  1947. "index_reduce_",
  1948. r"""
  1949. index_reduce_(dim, index, source, reduce, *, include_self=True) -> Tensor
  1950. Accumulate the elements of ``source`` into the :attr:`self`
  1951. tensor by accumulating to the indices in the order given in :attr:`index`
  1952. using the reduction given by the ``reduce`` argument. For example, if ``dim == 0``,
  1953. ``index[i] == j``, ``reduce == prod`` and ``include_self == True`` then the ``i``\ th
  1954. row of ``source`` is multiplied by the ``j``\ th row of :attr:`self`. If
  1955. :obj:`include_self="True"`, the values in the :attr:`self` tensor are included
  1956. in the reduction, otherwise, rows in the :attr:`self` tensor that are accumulated
  1957. to are treated as if they were filled with the reduction identites.
  1958. The :attr:`dim`\ th dimension of ``source`` must have the same size as the
  1959. length of :attr:`index` (which must be a vector), and all other dimensions must
  1960. match :attr:`self`, or an error will be raised.
  1961. For a 3-D tensor with :obj:`reduce="prod"` and :obj:`include_self=True` the
  1962. output is given as::
  1963. self[index[i], :, :] *= src[i, :, :] # if dim == 0
  1964. self[:, index[i], :] *= src[:, i, :] # if dim == 1
  1965. self[:, :, index[i]] *= src[:, :, i] # if dim == 2
  1966. Note:
  1967. {forward_reproducibility_note}
  1968. .. note::
  1969. This function only supports floating point tensors.
  1970. .. warning::
  1971. This function is in beta and may change in the near future.
  1972. Args:
  1973. dim (int): dimension along which to index
  1974. index (Tensor): indices of ``source`` to select from,
  1975. should have dtype either `torch.int64` or `torch.int32`
  1976. source (FloatTensor): the tensor containing values to accumulate
  1977. reduce (str): the reduction operation to apply
  1978. (:obj:`"prod"`, :obj:`"mean"`, :obj:`"amax"`, :obj:`"amin"`)
  1979. Keyword args:
  1980. include_self (bool): whether the elements from the ``self`` tensor are
  1981. included in the reduction
  1982. Example::
  1983. >>> x = torch.empty(5, 3).fill_(2)
  1984. >>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]], dtype=torch.float)
  1985. >>> index = torch.tensor([0, 4, 2, 0])
  1986. >>> x.index_reduce_(0, index, t, 'prod')
  1987. tensor([[20., 44., 72.],
  1988. [ 2., 2., 2.],
  1989. [14., 16., 18.],
  1990. [ 2., 2., 2.],
  1991. [ 8., 10., 12.]])
  1992. >>> x = torch.empty(5, 3).fill_(2)
  1993. >>> x.index_reduce_(0, index, t, 'prod', include_self=False)
  1994. tensor([[10., 22., 36.],
  1995. [ 2., 2., 2.],
  1996. [ 7., 8., 9.],
  1997. [ 2., 2., 2.],
  1998. [ 4., 5., 6.]])
  1999. """.format(
  2000. **reproducibility_notes
  2001. ),
  2002. )
  2003. add_docstr_all(
  2004. "index_select",
  2005. r"""
  2006. index_select(dim, index) -> Tensor
  2007. See :func:`torch.index_select`
  2008. """,
  2009. )
  2010. add_docstr_all(
  2011. "sparse_mask",
  2012. r"""
  2013. sparse_mask(mask) -> Tensor
  2014. Returns a new :ref:`sparse tensor <sparse-docs>` with values from a
  2015. strided tensor :attr:`self` filtered by the indices of the sparse
  2016. tensor :attr:`mask`. The values of :attr:`mask` sparse tensor are
  2017. ignored. :attr:`self` and :attr:`mask` tensors must have the same
  2018. shape.
  2019. .. note::
  2020. The returned sparse tensor might contain duplicate values if :attr:`mask`
  2021. is not coalesced. It is therefore advisable to pass ``mask.coalesce()``
  2022. if such behavior is not desired.
  2023. .. note::
  2024. The returned sparse tensor has the same indices as the sparse tensor
  2025. :attr:`mask`, even when the corresponding values in :attr:`self` are
  2026. zeros.
  2027. Args:
  2028. mask (Tensor): a sparse tensor whose indices are used as a filter
  2029. Example::
  2030. >>> nse = 5
  2031. >>> dims = (5, 5, 2, 2)
  2032. >>> I = torch.cat([torch.randint(0, dims[0], size=(nse,)),
  2033. ... torch.randint(0, dims[1], size=(nse,))], 0).reshape(2, nse)
  2034. >>> V = torch.randn(nse, dims[2], dims[3])
  2035. >>> S = torch.sparse_coo_tensor(I, V, dims).coalesce()
  2036. >>> D = torch.randn(dims)
  2037. >>> D.sparse_mask(S)
  2038. tensor(indices=tensor([[0, 0, 0, 2],
  2039. [0, 1, 4, 3]]),
  2040. values=tensor([[[ 1.6550, 0.2397],
  2041. [-0.1611, -0.0779]],
  2042. [[ 0.2326, -1.0558],
  2043. [ 1.4711, 1.9678]],
  2044. [[-0.5138, -0.0411],
  2045. [ 1.9417, 0.5158]],
  2046. [[ 0.0793, 0.0036],
  2047. [-0.2569, -0.1055]]]),
  2048. size=(5, 5, 2, 2), nnz=4, layout=torch.sparse_coo)
  2049. """,
  2050. )
  2051. add_docstr_all(
  2052. "inverse",
  2053. r"""
  2054. inverse() -> Tensor
  2055. See :func:`torch.inverse`
  2056. """,
  2057. )
  2058. add_docstr_all(
  2059. "isnan",
  2060. r"""
  2061. isnan() -> Tensor
  2062. See :func:`torch.isnan`
  2063. """,
  2064. )
  2065. add_docstr_all(
  2066. "isinf",
  2067. r"""
  2068. isinf() -> Tensor
  2069. See :func:`torch.isinf`
  2070. """,
  2071. )
  2072. add_docstr_all(
  2073. "isposinf",
  2074. r"""
  2075. isposinf() -> Tensor
  2076. See :func:`torch.isposinf`
  2077. """,
  2078. )
  2079. add_docstr_all(
  2080. "isneginf",
  2081. r"""
  2082. isneginf() -> Tensor
  2083. See :func:`torch.isneginf`
  2084. """,
  2085. )
  2086. add_docstr_all(
  2087. "isfinite",
  2088. r"""
  2089. isfinite() -> Tensor
  2090. See :func:`torch.isfinite`
  2091. """,
  2092. )
  2093. add_docstr_all(
  2094. "isclose",
  2095. r"""
  2096. isclose(other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor
  2097. See :func:`torch.isclose`
  2098. """,
  2099. )
  2100. add_docstr_all(
  2101. "isreal",
  2102. r"""
  2103. isreal() -> Tensor
  2104. See :func:`torch.isreal`
  2105. """,
  2106. )
  2107. add_docstr_all(
  2108. "is_coalesced",
  2109. r"""
  2110. is_coalesced() -> bool
  2111. Returns ``True`` if :attr:`self` is a :ref:`sparse COO tensor
  2112. <sparse-coo-docs>` that is coalesced, ``False`` otherwise.
  2113. .. warning::
  2114. Throws an error if :attr:`self` is not a sparse COO tensor.
  2115. See :meth:`coalesce` and :ref:`uncoalesced tensors <sparse-uncoalesced-coo-docs>`.
  2116. """,
  2117. )
  2118. add_docstr_all(
  2119. "is_contiguous",
  2120. r"""
  2121. is_contiguous(memory_format=torch.contiguous_format) -> bool
  2122. Returns True if :attr:`self` tensor is contiguous in memory in the order specified
  2123. by memory format.
  2124. Args:
  2125. memory_format (:class:`torch.memory_format`, optional): Specifies memory allocation
  2126. order. Default: ``torch.contiguous_format``.
  2127. """,
  2128. )
  2129. add_docstr_all(
  2130. "is_pinned",
  2131. r"""
  2132. Returns true if this tensor resides in pinned memory.
  2133. """,
  2134. )
  2135. add_docstr_all(
  2136. "is_floating_point",
  2137. r"""
  2138. is_floating_point() -> bool
  2139. Returns True if the data type of :attr:`self` is a floating point data type.
  2140. """,
  2141. )
  2142. add_docstr_all(
  2143. "is_complex",
  2144. r"""
  2145. is_complex() -> bool
  2146. Returns True if the data type of :attr:`self` is a complex data type.
  2147. """,
  2148. )
  2149. add_docstr_all(
  2150. "is_inference",
  2151. r"""
  2152. is_inference() -> bool
  2153. See :func:`torch.is_inference`
  2154. """,
  2155. )
  2156. add_docstr_all(
  2157. "is_conj",
  2158. r"""
  2159. is_conj() -> bool
  2160. Returns True if the conjugate bit of :attr:`self` is set to true.
  2161. """,
  2162. )
  2163. add_docstr_all(
  2164. "is_neg",
  2165. r"""
  2166. is_neg() -> bool
  2167. Returns True if the negative bit of :attr:`self` is set to true.
  2168. """,
  2169. )
  2170. add_docstr_all(
  2171. "is_signed",
  2172. r"""
  2173. is_signed() -> bool
  2174. Returns True if the data type of :attr:`self` is a signed data type.
  2175. """,
  2176. )
  2177. add_docstr_all(
  2178. "is_set_to",
  2179. r"""
  2180. is_set_to(tensor) -> bool
  2181. Returns True if both tensors are pointing to the exact same memory (same
  2182. storage, offset, size and stride).
  2183. """,
  2184. )
  2185. add_docstr_all(
  2186. "item",
  2187. r"""
  2188. item() -> number
  2189. Returns the value of this tensor as a standard Python number. This only works
  2190. for tensors with one element. For other cases, see :meth:`~Tensor.tolist`.
  2191. This operation is not differentiable.
  2192. Example::
  2193. >>> x = torch.tensor([1.0])
  2194. >>> x.item()
  2195. 1.0
  2196. """,
  2197. )
  2198. add_docstr_all(
  2199. "kron",
  2200. r"""
  2201. kron(other) -> Tensor
  2202. See :func:`torch.kron`
  2203. """,
  2204. )
  2205. add_docstr_all(
  2206. "kthvalue",
  2207. r"""
  2208. kthvalue(k, dim=None, keepdim=False) -> (Tensor, LongTensor)
  2209. See :func:`torch.kthvalue`
  2210. """,
  2211. )
  2212. add_docstr_all(
  2213. "ldexp",
  2214. r"""
  2215. ldexp(other) -> Tensor
  2216. See :func:`torch.ldexp`
  2217. """,
  2218. )
  2219. add_docstr_all(
  2220. "ldexp_",
  2221. r"""
  2222. ldexp_(other) -> Tensor
  2223. In-place version of :meth:`~Tensor.ldexp`
  2224. """,
  2225. )
  2226. add_docstr_all(
  2227. "lcm",
  2228. r"""
  2229. lcm(other) -> Tensor
  2230. See :func:`torch.lcm`
  2231. """,
  2232. )
  2233. add_docstr_all(
  2234. "lcm_",
  2235. r"""
  2236. lcm_(other) -> Tensor
  2237. In-place version of :meth:`~Tensor.lcm`
  2238. """,
  2239. )
  2240. add_docstr_all(
  2241. "le",
  2242. r"""
  2243. le(other) -> Tensor
  2244. See :func:`torch.le`.
  2245. """,
  2246. )
  2247. add_docstr_all(
  2248. "le_",
  2249. r"""
  2250. le_(other) -> Tensor
  2251. In-place version of :meth:`~Tensor.le`.
  2252. """,
  2253. )
  2254. add_docstr_all(
  2255. "less_equal",
  2256. r"""
  2257. less_equal(other) -> Tensor
  2258. See :func:`torch.less_equal`.
  2259. """,
  2260. )
  2261. add_docstr_all(
  2262. "less_equal_",
  2263. r"""
  2264. less_equal_(other) -> Tensor
  2265. In-place version of :meth:`~Tensor.less_equal`.
  2266. """,
  2267. )
  2268. add_docstr_all(
  2269. "lerp",
  2270. r"""
  2271. lerp(end, weight) -> Tensor
  2272. See :func:`torch.lerp`
  2273. """,
  2274. )
  2275. add_docstr_all(
  2276. "lerp_",
  2277. r"""
  2278. lerp_(end, weight) -> Tensor
  2279. In-place version of :meth:`~Tensor.lerp`
  2280. """,
  2281. )
  2282. add_docstr_all(
  2283. "lgamma",
  2284. r"""
  2285. lgamma() -> Tensor
  2286. See :func:`torch.lgamma`
  2287. """,
  2288. )
  2289. add_docstr_all(
  2290. "lgamma_",
  2291. r"""
  2292. lgamma_() -> Tensor
  2293. In-place version of :meth:`~Tensor.lgamma`
  2294. """,
  2295. )
  2296. add_docstr_all(
  2297. "log",
  2298. r"""
  2299. log() -> Tensor
  2300. See :func:`torch.log`
  2301. """,
  2302. )
  2303. add_docstr_all(
  2304. "log_",
  2305. r"""
  2306. log_() -> Tensor
  2307. In-place version of :meth:`~Tensor.log`
  2308. """,
  2309. )
  2310. add_docstr_all(
  2311. "log10",
  2312. r"""
  2313. log10() -> Tensor
  2314. See :func:`torch.log10`
  2315. """,
  2316. )
  2317. add_docstr_all(
  2318. "log10_",
  2319. r"""
  2320. log10_() -> Tensor
  2321. In-place version of :meth:`~Tensor.log10`
  2322. """,
  2323. )
  2324. add_docstr_all(
  2325. "log1p",
  2326. r"""
  2327. log1p() -> Tensor
  2328. See :func:`torch.log1p`
  2329. """,
  2330. )
  2331. add_docstr_all(
  2332. "log1p_",
  2333. r"""
  2334. log1p_() -> Tensor
  2335. In-place version of :meth:`~Tensor.log1p`
  2336. """,
  2337. )
  2338. add_docstr_all(
  2339. "log2",
  2340. r"""
  2341. log2() -> Tensor
  2342. See :func:`torch.log2`
  2343. """,
  2344. )
  2345. add_docstr_all(
  2346. "log2_",
  2347. r"""
  2348. log2_() -> Tensor
  2349. In-place version of :meth:`~Tensor.log2`
  2350. """,
  2351. )
  2352. add_docstr_all(
  2353. "logaddexp",
  2354. r"""
  2355. logaddexp(other) -> Tensor
  2356. See :func:`torch.logaddexp`
  2357. """,
  2358. )
  2359. add_docstr_all(
  2360. "logaddexp2",
  2361. r"""
  2362. logaddexp2(other) -> Tensor
  2363. See :func:`torch.logaddexp2`
  2364. """,
  2365. )
  2366. add_docstr_all(
  2367. "log_normal_",
  2368. r"""
  2369. log_normal_(mean=1, std=2, *, generator=None)
  2370. Fills :attr:`self` tensor with numbers samples from the log-normal distribution
  2371. parameterized by the given mean :math:`\mu` and standard deviation
  2372. :math:`\sigma`. Note that :attr:`mean` and :attr:`std` are the mean and
  2373. standard deviation of the underlying normal distribution, and not of the
  2374. returned distribution:
  2375. .. math::
  2376. f(x) = \dfrac{1}{x \sigma \sqrt{2\pi}}\ e^{-\frac{(\ln x - \mu)^2}{2\sigma^2}}
  2377. """,
  2378. )
  2379. add_docstr_all(
  2380. "logsumexp",
  2381. r"""
  2382. logsumexp(dim, keepdim=False) -> Tensor
  2383. See :func:`torch.logsumexp`
  2384. """,
  2385. )
  2386. add_docstr_all(
  2387. "lt",
  2388. r"""
  2389. lt(other) -> Tensor
  2390. See :func:`torch.lt`.
  2391. """,
  2392. )
  2393. add_docstr_all(
  2394. "lt_",
  2395. r"""
  2396. lt_(other) -> Tensor
  2397. In-place version of :meth:`~Tensor.lt`.
  2398. """,
  2399. )
  2400. add_docstr_all(
  2401. "less",
  2402. r"""
  2403. lt(other) -> Tensor
  2404. See :func:`torch.less`.
  2405. """,
  2406. )
  2407. add_docstr_all(
  2408. "less_",
  2409. r"""
  2410. less_(other) -> Tensor
  2411. In-place version of :meth:`~Tensor.less`.
  2412. """,
  2413. )
  2414. add_docstr_all(
  2415. "lu_solve",
  2416. r"""
  2417. lu_solve(LU_data, LU_pivots) -> Tensor
  2418. See :func:`torch.lu_solve`
  2419. """,
  2420. )
  2421. add_docstr_all(
  2422. "map_",
  2423. r"""
  2424. map_(tensor, callable)
  2425. Applies :attr:`callable` for each element in :attr:`self` tensor and the given
  2426. :attr:`tensor` and stores the results in :attr:`self` tensor. :attr:`self` tensor and
  2427. the given :attr:`tensor` must be :ref:`broadcastable <broadcasting-semantics>`.
  2428. The :attr:`callable` should have the signature::
  2429. def callable(a, b) -> number
  2430. """,
  2431. )
  2432. add_docstr_all(
  2433. "masked_scatter_",
  2434. r"""
  2435. masked_scatter_(mask, source)
  2436. Copies elements from :attr:`source` into :attr:`self` tensor at positions where
  2437. the :attr:`mask` is True. Elements from :attr:`source` are copied into :attr:`self`
  2438. starting at position 0 of :attr:`source` and continuing in order one-by-one for each
  2439. occurrence of :attr:`mask` being True.
  2440. The shape of :attr:`mask` must be :ref:`broadcastable <broadcasting-semantics>`
  2441. with the shape of the underlying tensor. The :attr:`source` should have at least
  2442. as many elements as the number of ones in :attr:`mask`.
  2443. Args:
  2444. mask (BoolTensor): the boolean mask
  2445. source (Tensor): the tensor to copy from
  2446. .. note::
  2447. The :attr:`mask` operates on the :attr:`self` tensor, not on the given
  2448. :attr:`source` tensor.
  2449. Example:
  2450. >>> self = torch.tensor([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]])
  2451. >>> mask = torch.tensor([[0, 0, 0, 1, 1], [1, 1, 0, 1, 1]])
  2452. >>> source = torch.tensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
  2453. >>> self.masked_scatter_(mask, source)
  2454. tensor([[0, 0, 0, 0, 1],
  2455. [2, 3, 0, 4, 5]])
  2456. """,
  2457. )
  2458. add_docstr_all(
  2459. "masked_fill_",
  2460. r"""
  2461. masked_fill_(mask, value)
  2462. Fills elements of :attr:`self` tensor with :attr:`value` where :attr:`mask` is
  2463. True. The shape of :attr:`mask` must be
  2464. :ref:`broadcastable <broadcasting-semantics>` with the shape of the underlying
  2465. tensor.
  2466. Args:
  2467. mask (BoolTensor): the boolean mask
  2468. value (float): the value to fill in with
  2469. """,
  2470. )
  2471. add_docstr_all(
  2472. "masked_select",
  2473. r"""
  2474. masked_select(mask) -> Tensor
  2475. See :func:`torch.masked_select`
  2476. """,
  2477. )
  2478. add_docstr_all(
  2479. "matrix_power",
  2480. r"""
  2481. matrix_power(n) -> Tensor
  2482. .. note:: :meth:`~Tensor.matrix_power` is deprecated, use :func:`torch.linalg.matrix_power` instead.
  2483. Alias for :func:`torch.linalg.matrix_power`
  2484. """,
  2485. )
  2486. add_docstr_all(
  2487. "matrix_exp",
  2488. r"""
  2489. matrix_exp() -> Tensor
  2490. See :func:`torch.matrix_exp`
  2491. """,
  2492. )
  2493. add_docstr_all(
  2494. "max",
  2495. r"""
  2496. max(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)
  2497. See :func:`torch.max`
  2498. """,
  2499. )
  2500. add_docstr_all(
  2501. "amax",
  2502. r"""
  2503. amax(dim=None, keepdim=False) -> Tensor
  2504. See :func:`torch.amax`
  2505. """,
  2506. )
  2507. add_docstr_all(
  2508. "maximum",
  2509. r"""
  2510. maximum(other) -> Tensor
  2511. See :func:`torch.maximum`
  2512. """,
  2513. )
  2514. add_docstr_all(
  2515. "fmax",
  2516. r"""
  2517. fmax(other) -> Tensor
  2518. See :func:`torch.fmax`
  2519. """,
  2520. )
  2521. add_docstr_all(
  2522. "argmax",
  2523. r"""
  2524. argmax(dim=None, keepdim=False) -> LongTensor
  2525. See :func:`torch.argmax`
  2526. """,
  2527. )
  2528. add_docstr_all(
  2529. "argwhere",
  2530. r"""
  2531. argwhere() -> Tensor
  2532. See :func:`torch.argwhere`
  2533. """,
  2534. )
  2535. add_docstr_all(
  2536. "mean",
  2537. r"""
  2538. mean(dim=None, keepdim=False, *, dtype=None) -> Tensor
  2539. See :func:`torch.mean`
  2540. """,
  2541. )
  2542. add_docstr_all(
  2543. "nanmean",
  2544. r"""
  2545. nanmean(dim=None, keepdim=False, *, dtype=None) -> Tensor
  2546. See :func:`torch.nanmean`
  2547. """,
  2548. )
  2549. add_docstr_all(
  2550. "median",
  2551. r"""
  2552. median(dim=None, keepdim=False) -> (Tensor, LongTensor)
  2553. See :func:`torch.median`
  2554. """,
  2555. )
  2556. add_docstr_all(
  2557. "nanmedian",
  2558. r"""
  2559. nanmedian(dim=None, keepdim=False) -> (Tensor, LongTensor)
  2560. See :func:`torch.nanmedian`
  2561. """,
  2562. )
  2563. add_docstr_all(
  2564. "min",
  2565. r"""
  2566. min(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)
  2567. See :func:`torch.min`
  2568. """,
  2569. )
  2570. add_docstr_all(
  2571. "amin",
  2572. r"""
  2573. amin(dim=None, keepdim=False) -> Tensor
  2574. See :func:`torch.amin`
  2575. """,
  2576. )
  2577. add_docstr_all(
  2578. "minimum",
  2579. r"""
  2580. minimum(other) -> Tensor
  2581. See :func:`torch.minimum`
  2582. """,
  2583. )
  2584. add_docstr_all(
  2585. "aminmax",
  2586. r"""
  2587. aminmax(*, dim=None, keepdim=False) -> (Tensor min, Tensor max)
  2588. See :func:`torch.aminmax`
  2589. """,
  2590. )
  2591. add_docstr_all(
  2592. "fmin",
  2593. r"""
  2594. fmin(other) -> Tensor
  2595. See :func:`torch.fmin`
  2596. """,
  2597. )
  2598. add_docstr_all(
  2599. "argmin",
  2600. r"""
  2601. argmin(dim=None, keepdim=False) -> LongTensor
  2602. See :func:`torch.argmin`
  2603. """,
  2604. )
  2605. add_docstr_all(
  2606. "mm",
  2607. r"""
  2608. mm(mat2) -> Tensor
  2609. See :func:`torch.mm`
  2610. """,
  2611. )
  2612. add_docstr_all(
  2613. "mode",
  2614. r"""
  2615. mode(dim=None, keepdim=False) -> (Tensor, LongTensor)
  2616. See :func:`torch.mode`
  2617. """,
  2618. )
  2619. add_docstr_all(
  2620. "movedim",
  2621. r"""
  2622. movedim(source, destination) -> Tensor
  2623. See :func:`torch.movedim`
  2624. """,
  2625. )
  2626. add_docstr_all(
  2627. "moveaxis",
  2628. r"""
  2629. moveaxis(source, destination) -> Tensor
  2630. See :func:`torch.moveaxis`
  2631. """,
  2632. )
  2633. add_docstr_all(
  2634. "mul",
  2635. r"""
  2636. mul(value) -> Tensor
  2637. See :func:`torch.mul`.
  2638. """,
  2639. )
  2640. add_docstr_all(
  2641. "mul_",
  2642. r"""
  2643. mul_(value) -> Tensor
  2644. In-place version of :meth:`~Tensor.mul`.
  2645. """,
  2646. )
  2647. add_docstr_all(
  2648. "multiply",
  2649. r"""
  2650. multiply(value) -> Tensor
  2651. See :func:`torch.multiply`.
  2652. """,
  2653. )
  2654. add_docstr_all(
  2655. "multiply_",
  2656. r"""
  2657. multiply_(value) -> Tensor
  2658. In-place version of :meth:`~Tensor.multiply`.
  2659. """,
  2660. )
  2661. add_docstr_all(
  2662. "multinomial",
  2663. r"""
  2664. multinomial(num_samples, replacement=False, *, generator=None) -> Tensor
  2665. See :func:`torch.multinomial`
  2666. """,
  2667. )
  2668. add_docstr_all(
  2669. "mv",
  2670. r"""
  2671. mv(vec) -> Tensor
  2672. See :func:`torch.mv`
  2673. """,
  2674. )
  2675. add_docstr_all(
  2676. "mvlgamma",
  2677. r"""
  2678. mvlgamma(p) -> Tensor
  2679. See :func:`torch.mvlgamma`
  2680. """,
  2681. )
  2682. add_docstr_all(
  2683. "mvlgamma_",
  2684. r"""
  2685. mvlgamma_(p) -> Tensor
  2686. In-place version of :meth:`~Tensor.mvlgamma`
  2687. """,
  2688. )
  2689. add_docstr_all(
  2690. "narrow",
  2691. r"""
  2692. narrow(dimension, start, length) -> Tensor
  2693. See :func:`torch.narrow`.
  2694. """,
  2695. )
  2696. add_docstr_all(
  2697. "narrow_copy",
  2698. r"""
  2699. narrow_copy(dimension, start, length) -> Tensor
  2700. See :func:`torch.narrow_copy`.
  2701. """,
  2702. )
  2703. add_docstr_all(
  2704. "ndimension",
  2705. r"""
  2706. ndimension() -> int
  2707. Alias for :meth:`~Tensor.dim()`
  2708. """,
  2709. )
  2710. add_docstr_all(
  2711. "nan_to_num",
  2712. r"""
  2713. nan_to_num(nan=0.0, posinf=None, neginf=None) -> Tensor
  2714. See :func:`torch.nan_to_num`.
  2715. """,
  2716. )
  2717. add_docstr_all(
  2718. "nan_to_num_",
  2719. r"""
  2720. nan_to_num_(nan=0.0, posinf=None, neginf=None) -> Tensor
  2721. In-place version of :meth:`~Tensor.nan_to_num`.
  2722. """,
  2723. )
  2724. add_docstr_all(
  2725. "ne",
  2726. r"""
  2727. ne(other) -> Tensor
  2728. See :func:`torch.ne`.
  2729. """,
  2730. )
  2731. add_docstr_all(
  2732. "ne_",
  2733. r"""
  2734. ne_(other) -> Tensor
  2735. In-place version of :meth:`~Tensor.ne`.
  2736. """,
  2737. )
  2738. add_docstr_all(
  2739. "not_equal",
  2740. r"""
  2741. not_equal(other) -> Tensor
  2742. See :func:`torch.not_equal`.
  2743. """,
  2744. )
  2745. add_docstr_all(
  2746. "not_equal_",
  2747. r"""
  2748. not_equal_(other) -> Tensor
  2749. In-place version of :meth:`~Tensor.not_equal`.
  2750. """,
  2751. )
  2752. add_docstr_all(
  2753. "neg",
  2754. r"""
  2755. neg() -> Tensor
  2756. See :func:`torch.neg`
  2757. """,
  2758. )
  2759. add_docstr_all(
  2760. "negative",
  2761. r"""
  2762. negative() -> Tensor
  2763. See :func:`torch.negative`
  2764. """,
  2765. )
  2766. add_docstr_all(
  2767. "neg_",
  2768. r"""
  2769. neg_() -> Tensor
  2770. In-place version of :meth:`~Tensor.neg`
  2771. """,
  2772. )
  2773. add_docstr_all(
  2774. "negative_",
  2775. r"""
  2776. negative_() -> Tensor
  2777. In-place version of :meth:`~Tensor.negative`
  2778. """,
  2779. )
  2780. add_docstr_all(
  2781. "nelement",
  2782. r"""
  2783. nelement() -> int
  2784. Alias for :meth:`~Tensor.numel`
  2785. """,
  2786. )
  2787. add_docstr_all(
  2788. "nextafter",
  2789. r"""
  2790. nextafter(other) -> Tensor
  2791. See :func:`torch.nextafter`
  2792. """,
  2793. )
  2794. add_docstr_all(
  2795. "nextafter_",
  2796. r"""
  2797. nextafter_(other) -> Tensor
  2798. In-place version of :meth:`~Tensor.nextafter`
  2799. """,
  2800. )
  2801. add_docstr_all(
  2802. "nonzero",
  2803. r"""
  2804. nonzero() -> LongTensor
  2805. See :func:`torch.nonzero`
  2806. """,
  2807. )
  2808. add_docstr_all(
  2809. "norm",
  2810. r"""
  2811. norm(p=2, dim=None, keepdim=False) -> Tensor
  2812. See :func:`torch.norm`
  2813. """,
  2814. )
  2815. add_docstr_all(
  2816. "normal_",
  2817. r"""
  2818. normal_(mean=0, std=1, *, generator=None) -> Tensor
  2819. Fills :attr:`self` tensor with elements samples from the normal distribution
  2820. parameterized by :attr:`mean` and :attr:`std`.
  2821. """,
  2822. )
  2823. add_docstr_all(
  2824. "numel",
  2825. r"""
  2826. numel() -> int
  2827. See :func:`torch.numel`
  2828. """,
  2829. )
  2830. add_docstr_all(
  2831. "numpy",
  2832. r"""
  2833. numpy(*, force=False) -> numpy.ndarray
  2834. Returns the tensor as a NumPy :class:`ndarray`.
  2835. If :attr:`force` is ``False`` (the default), the conversion
  2836. is performed only if the tensor is on the CPU, does not require grad,
  2837. does not have its conjugate bit set, and is a dtype and layout that
  2838. NumPy supports. The returned ndarray and the tensor will share their
  2839. storage, so changes to the tensor will be reflected in the ndarray
  2840. and vice versa.
  2841. If :attr:`force` is ``True`` this is equivalent to
  2842. calling ``t.detach().cpu().resolve_conj().resolve_neg().numpy()``.
  2843. If the tensor isn't on the CPU or the conjugate or negative bit is set,
  2844. the tensor won't share its storage with the returned ndarray.
  2845. Setting :attr:`force` to ``True`` can be a useful shorthand.
  2846. Args:
  2847. force (bool): if ``True``, the ndarray may be a copy of the tensor
  2848. instead of always sharing memory, defaults to ``False``.
  2849. """,
  2850. )
  2851. add_docstr_all(
  2852. "orgqr",
  2853. r"""
  2854. orgqr(input2) -> Tensor
  2855. See :func:`torch.orgqr`
  2856. """,
  2857. )
  2858. add_docstr_all(
  2859. "ormqr",
  2860. r"""
  2861. ormqr(input2, input3, left=True, transpose=False) -> Tensor
  2862. See :func:`torch.ormqr`
  2863. """,
  2864. )
  2865. add_docstr_all(
  2866. "permute",
  2867. r"""
  2868. permute(*dims) -> Tensor
  2869. See :func:`torch.permute`
  2870. """,
  2871. )
  2872. add_docstr_all(
  2873. "polygamma",
  2874. r"""
  2875. polygamma(n) -> Tensor
  2876. See :func:`torch.polygamma`
  2877. """,
  2878. )
  2879. add_docstr_all(
  2880. "polygamma_",
  2881. r"""
  2882. polygamma_(n) -> Tensor
  2883. In-place version of :meth:`~Tensor.polygamma`
  2884. """,
  2885. )
  2886. add_docstr_all(
  2887. "positive",
  2888. r"""
  2889. positive() -> Tensor
  2890. See :func:`torch.positive`
  2891. """,
  2892. )
  2893. add_docstr_all(
  2894. "pow",
  2895. r"""
  2896. pow(exponent) -> Tensor
  2897. See :func:`torch.pow`
  2898. """,
  2899. )
  2900. add_docstr_all(
  2901. "pow_",
  2902. r"""
  2903. pow_(exponent) -> Tensor
  2904. In-place version of :meth:`~Tensor.pow`
  2905. """,
  2906. )
  2907. add_docstr_all(
  2908. "float_power",
  2909. r"""
  2910. float_power(exponent) -> Tensor
  2911. See :func:`torch.float_power`
  2912. """,
  2913. )
  2914. add_docstr_all(
  2915. "float_power_",
  2916. r"""
  2917. float_power_(exponent) -> Tensor
  2918. In-place version of :meth:`~Tensor.float_power`
  2919. """,
  2920. )
  2921. add_docstr_all(
  2922. "prod",
  2923. r"""
  2924. prod(dim=None, keepdim=False, dtype=None) -> Tensor
  2925. See :func:`torch.prod`
  2926. """,
  2927. )
  2928. add_docstr_all(
  2929. "put_",
  2930. r"""
  2931. put_(index, source, accumulate=False) -> Tensor
  2932. Copies the elements from :attr:`source` into the positions specified by
  2933. :attr:`index`. For the purpose of indexing, the :attr:`self` tensor is treated as if
  2934. it were a 1-D tensor.
  2935. :attr:`index` and :attr:`source` need to have the same number of elements, but not necessarily
  2936. the same shape.
  2937. If :attr:`accumulate` is ``True``, the elements in :attr:`source` are added to
  2938. :attr:`self`. If accumulate is ``False``, the behavior is undefined if :attr:`index`
  2939. contain duplicate elements.
  2940. Args:
  2941. index (LongTensor): the indices into self
  2942. source (Tensor): the tensor containing values to copy from
  2943. accumulate (bool): whether to accumulate into self
  2944. Example::
  2945. >>> src = torch.tensor([[4, 3, 5],
  2946. ... [6, 7, 8]])
  2947. >>> src.put_(torch.tensor([1, 3]), torch.tensor([9, 10]))
  2948. tensor([[ 4, 9, 5],
  2949. [ 10, 7, 8]])
  2950. """,
  2951. )
  2952. add_docstr_all(
  2953. "put",
  2954. r"""
  2955. put(input, index, source, accumulate=False) -> Tensor
  2956. Out-of-place version of :meth:`torch.Tensor.put_`.
  2957. `input` corresponds to `self` in :meth:`torch.Tensor.put_`.
  2958. """,
  2959. )
  2960. add_docstr_all(
  2961. "qr",
  2962. r"""
  2963. qr(some=True) -> (Tensor, Tensor)
  2964. See :func:`torch.qr`
  2965. """,
  2966. )
  2967. add_docstr_all(
  2968. "qscheme",
  2969. r"""
  2970. qscheme() -> torch.qscheme
  2971. Returns the quantization scheme of a given QTensor.
  2972. """,
  2973. )
  2974. add_docstr_all(
  2975. "quantile",
  2976. r"""
  2977. quantile(q, dim=None, keepdim=False, *, interpolation='linear') -> Tensor
  2978. See :func:`torch.quantile`
  2979. """,
  2980. )
  2981. add_docstr_all(
  2982. "nanquantile",
  2983. r"""
  2984. nanquantile(q, dim=None, keepdim=False, *, interpolation='linear') -> Tensor
  2985. See :func:`torch.nanquantile`
  2986. """,
  2987. )
  2988. add_docstr_all(
  2989. "q_scale",
  2990. r"""
  2991. q_scale() -> float
  2992. Given a Tensor quantized by linear(affine) quantization,
  2993. returns the scale of the underlying quantizer().
  2994. """,
  2995. )
  2996. add_docstr_all(
  2997. "q_zero_point",
  2998. r"""
  2999. q_zero_point() -> int
  3000. Given a Tensor quantized by linear(affine) quantization,
  3001. returns the zero_point of the underlying quantizer().
  3002. """,
  3003. )
  3004. add_docstr_all(
  3005. "q_per_channel_scales",
  3006. r"""
  3007. q_per_channel_scales() -> Tensor
  3008. Given a Tensor quantized by linear (affine) per-channel quantization,
  3009. returns a Tensor of scales of the underlying quantizer. It has the number of
  3010. elements that matches the corresponding dimensions (from q_per_channel_axis) of
  3011. the tensor.
  3012. """,
  3013. )
  3014. add_docstr_all(
  3015. "q_per_channel_zero_points",
  3016. r"""
  3017. q_per_channel_zero_points() -> Tensor
  3018. Given a Tensor quantized by linear (affine) per-channel quantization,
  3019. returns a tensor of zero_points of the underlying quantizer. It has the number of
  3020. elements that matches the corresponding dimensions (from q_per_channel_axis) of
  3021. the tensor.
  3022. """,
  3023. )
  3024. add_docstr_all(
  3025. "q_per_channel_axis",
  3026. r"""
  3027. q_per_channel_axis() -> int
  3028. Given a Tensor quantized by linear (affine) per-channel quantization,
  3029. returns the index of dimension on which per-channel quantization is applied.
  3030. """,
  3031. )
  3032. add_docstr_all(
  3033. "random_",
  3034. r"""
  3035. random_(from=0, to=None, *, generator=None) -> Tensor
  3036. Fills :attr:`self` tensor with numbers sampled from the discrete uniform
  3037. distribution over ``[from, to - 1]``. If not specified, the values are usually
  3038. only bounded by :attr:`self` tensor's data type. However, for floating point
  3039. types, if unspecified, range will be ``[0, 2^mantissa]`` to ensure that every
  3040. value is representable. For example, `torch.tensor(1, dtype=torch.double).random_()`
  3041. will be uniform in ``[0, 2^53]``.
  3042. """,
  3043. )
  3044. add_docstr_all(
  3045. "rad2deg",
  3046. r"""
  3047. rad2deg() -> Tensor
  3048. See :func:`torch.rad2deg`
  3049. """,
  3050. )
  3051. add_docstr_all(
  3052. "rad2deg_",
  3053. r"""
  3054. rad2deg_() -> Tensor
  3055. In-place version of :meth:`~Tensor.rad2deg`
  3056. """,
  3057. )
  3058. add_docstr_all(
  3059. "deg2rad",
  3060. r"""
  3061. deg2rad() -> Tensor
  3062. See :func:`torch.deg2rad`
  3063. """,
  3064. )
  3065. add_docstr_all(
  3066. "deg2rad_",
  3067. r"""
  3068. deg2rad_() -> Tensor
  3069. In-place version of :meth:`~Tensor.deg2rad`
  3070. """,
  3071. )
  3072. add_docstr_all(
  3073. "ravel",
  3074. r"""
  3075. ravel() -> Tensor
  3076. see :func:`torch.ravel`
  3077. """,
  3078. )
  3079. add_docstr_all(
  3080. "reciprocal",
  3081. r"""
  3082. reciprocal() -> Tensor
  3083. See :func:`torch.reciprocal`
  3084. """,
  3085. )
  3086. add_docstr_all(
  3087. "reciprocal_",
  3088. r"""
  3089. reciprocal_() -> Tensor
  3090. In-place version of :meth:`~Tensor.reciprocal`
  3091. """,
  3092. )
  3093. add_docstr_all(
  3094. "record_stream",
  3095. r"""
  3096. record_stream(stream)
  3097. Ensures that the tensor memory is not reused for another tensor until all
  3098. current work queued on :attr:`stream` are complete.
  3099. .. note::
  3100. The caching allocator is aware of only the stream where a tensor was
  3101. allocated. Due to the awareness, it already correctly manages the life
  3102. cycle of tensors on only one stream. But if a tensor is used on a stream
  3103. different from the stream of origin, the allocator might reuse the memory
  3104. unexpectedly. Calling this method lets the allocator know which streams
  3105. have used the tensor.
  3106. """,
  3107. )
  3108. add_docstr_all(
  3109. "remainder",
  3110. r"""
  3111. remainder(divisor) -> Tensor
  3112. See :func:`torch.remainder`
  3113. """,
  3114. )
  3115. add_docstr_all(
  3116. "remainder_",
  3117. r"""
  3118. remainder_(divisor) -> Tensor
  3119. In-place version of :meth:`~Tensor.remainder`
  3120. """,
  3121. )
  3122. add_docstr_all(
  3123. "renorm",
  3124. r"""
  3125. renorm(p, dim, maxnorm) -> Tensor
  3126. See :func:`torch.renorm`
  3127. """,
  3128. )
  3129. add_docstr_all(
  3130. "renorm_",
  3131. r"""
  3132. renorm_(p, dim, maxnorm) -> Tensor
  3133. In-place version of :meth:`~Tensor.renorm`
  3134. """,
  3135. )
  3136. add_docstr_all(
  3137. "repeat",
  3138. r"""
  3139. repeat(*sizes) -> Tensor
  3140. Repeats this tensor along the specified dimensions.
  3141. Unlike :meth:`~Tensor.expand`, this function copies the tensor's data.
  3142. .. warning::
  3143. :meth:`~Tensor.repeat` behaves differently from
  3144. `numpy.repeat <https://docs.scipy.org/doc/numpy/reference/generated/numpy.repeat.html>`_,
  3145. but is more similar to
  3146. `numpy.tile <https://docs.scipy.org/doc/numpy/reference/generated/numpy.tile.html>`_.
  3147. For the operator similar to `numpy.repeat`, see :func:`torch.repeat_interleave`.
  3148. Args:
  3149. sizes (torch.Size or int...): The number of times to repeat this tensor along each
  3150. dimension
  3151. Example::
  3152. >>> x = torch.tensor([1, 2, 3])
  3153. >>> x.repeat(4, 2)
  3154. tensor([[ 1, 2, 3, 1, 2, 3],
  3155. [ 1, 2, 3, 1, 2, 3],
  3156. [ 1, 2, 3, 1, 2, 3],
  3157. [ 1, 2, 3, 1, 2, 3]])
  3158. >>> x.repeat(4, 2, 1).size()
  3159. torch.Size([4, 2, 3])
  3160. """,
  3161. )
  3162. add_docstr_all(
  3163. "repeat_interleave",
  3164. r"""
  3165. repeat_interleave(repeats, dim=None, *, output_size=None) -> Tensor
  3166. See :func:`torch.repeat_interleave`.
  3167. """,
  3168. )
  3169. add_docstr_all(
  3170. "requires_grad_",
  3171. r"""
  3172. requires_grad_(requires_grad=True) -> Tensor
  3173. Change if autograd should record operations on this tensor: sets this tensor's
  3174. :attr:`requires_grad` attribute in-place. Returns this tensor.
  3175. :func:`requires_grad_`'s main use case is to tell autograd to begin recording
  3176. operations on a Tensor ``tensor``. If ``tensor`` has ``requires_grad=False``
  3177. (because it was obtained through a DataLoader, or required preprocessing or
  3178. initialization), ``tensor.requires_grad_()`` makes it so that autograd will
  3179. begin to record operations on ``tensor``.
  3180. Args:
  3181. requires_grad (bool): If autograd should record operations on this tensor.
  3182. Default: ``True``.
  3183. Example::
  3184. >>> # Let's say we want to preprocess some saved weights and use
  3185. >>> # the result as new weights.
  3186. >>> saved_weights = [0.1, 0.2, 0.3, 0.25]
  3187. >>> loaded_weights = torch.tensor(saved_weights)
  3188. >>> weights = preprocess(loaded_weights) # some function
  3189. >>> weights
  3190. tensor([-0.5503, 0.4926, -2.1158, -0.8303])
  3191. >>> # Now, start to record operations done to weights
  3192. >>> weights.requires_grad_()
  3193. >>> out = weights.pow(2).sum()
  3194. >>> out.backward()
  3195. >>> weights.grad
  3196. tensor([-1.1007, 0.9853, -4.2316, -1.6606])
  3197. """,
  3198. )
  3199. add_docstr_all(
  3200. "reshape",
  3201. r"""
  3202. reshape(*shape) -> Tensor
  3203. Returns a tensor with the same data and number of elements as :attr:`self`
  3204. but with the specified shape. This method returns a view if :attr:`shape` is
  3205. compatible with the current shape. See :meth:`torch.Tensor.view` on when it is
  3206. possible to return a view.
  3207. See :func:`torch.reshape`
  3208. Args:
  3209. shape (tuple of ints or int...): the desired shape
  3210. """,
  3211. )
  3212. add_docstr_all(
  3213. "reshape_as",
  3214. r"""
  3215. reshape_as(other) -> Tensor
  3216. Returns this tensor as the same shape as :attr:`other`.
  3217. ``self.reshape_as(other)`` is equivalent to ``self.reshape(other.sizes())``.
  3218. This method returns a view if ``other.sizes()`` is compatible with the current
  3219. shape. See :meth:`torch.Tensor.view` on when it is possible to return a view.
  3220. Please see :meth:`reshape` for more information about ``reshape``.
  3221. Args:
  3222. other (:class:`torch.Tensor`): The result tensor has the same shape
  3223. as :attr:`other`.
  3224. """,
  3225. )
  3226. add_docstr_all(
  3227. "resize_",
  3228. r"""
  3229. resize_(*sizes, memory_format=torch.contiguous_format) -> Tensor
  3230. Resizes :attr:`self` tensor to the specified size. If the number of elements is
  3231. larger than the current storage size, then the underlying storage is resized
  3232. to fit the new number of elements. If the number of elements is smaller, the
  3233. underlying storage is not changed. Existing elements are preserved but any new
  3234. memory is uninitialized.
  3235. .. warning::
  3236. This is a low-level method. The storage is reinterpreted as C-contiguous,
  3237. ignoring the current strides (unless the target size equals the current
  3238. size, in which case the tensor is left unchanged). For most purposes, you
  3239. will instead want to use :meth:`~Tensor.view()`, which checks for
  3240. contiguity, or :meth:`~Tensor.reshape()`, which copies data if needed. To
  3241. change the size in-place with custom strides, see :meth:`~Tensor.set_()`.
  3242. Args:
  3243. sizes (torch.Size or int...): the desired size
  3244. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  3245. Tensor. Default: ``torch.contiguous_format``. Note that memory format of
  3246. :attr:`self` is going to be unaffected if ``self.size()`` matches ``sizes``.
  3247. Example::
  3248. >>> x = torch.tensor([[1, 2], [3, 4], [5, 6]])
  3249. >>> x.resize_(2, 2)
  3250. tensor([[ 1, 2],
  3251. [ 3, 4]])
  3252. """,
  3253. )
  3254. add_docstr_all(
  3255. "resize_as_",
  3256. r"""
  3257. resize_as_(tensor, memory_format=torch.contiguous_format) -> Tensor
  3258. Resizes the :attr:`self` tensor to be the same size as the specified
  3259. :attr:`tensor`. This is equivalent to ``self.resize_(tensor.size())``.
  3260. Args:
  3261. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  3262. Tensor. Default: ``torch.contiguous_format``. Note that memory format of
  3263. :attr:`self` is going to be unaffected if ``self.size()`` matches ``tensor.size()``.
  3264. """,
  3265. )
  3266. add_docstr_all(
  3267. "rot90",
  3268. r"""
  3269. rot90(k, dims) -> Tensor
  3270. See :func:`torch.rot90`
  3271. """,
  3272. )
  3273. add_docstr_all(
  3274. "round",
  3275. r"""
  3276. round(decimals=0) -> Tensor
  3277. See :func:`torch.round`
  3278. """,
  3279. )
  3280. add_docstr_all(
  3281. "round_",
  3282. r"""
  3283. round_(decimals=0) -> Tensor
  3284. In-place version of :meth:`~Tensor.round`
  3285. """,
  3286. )
  3287. add_docstr_all(
  3288. "rsqrt",
  3289. r"""
  3290. rsqrt() -> Tensor
  3291. See :func:`torch.rsqrt`
  3292. """,
  3293. )
  3294. add_docstr_all(
  3295. "rsqrt_",
  3296. r"""
  3297. rsqrt_() -> Tensor
  3298. In-place version of :meth:`~Tensor.rsqrt`
  3299. """,
  3300. )
  3301. add_docstr_all(
  3302. "scatter_",
  3303. r"""
  3304. scatter_(dim, index, src, reduce=None) -> Tensor
  3305. Writes all values from the tensor :attr:`src` into :attr:`self` at the indices
  3306. specified in the :attr:`index` tensor. For each value in :attr:`src`, its output
  3307. index is specified by its index in :attr:`src` for ``dimension != dim`` and by
  3308. the corresponding value in :attr:`index` for ``dimension = dim``.
  3309. For a 3-D tensor, :attr:`self` is updated as::
  3310. self[index[i][j][k]][j][k] = src[i][j][k] # if dim == 0
  3311. self[i][index[i][j][k]][k] = src[i][j][k] # if dim == 1
  3312. self[i][j][index[i][j][k]] = src[i][j][k] # if dim == 2
  3313. This is the reverse operation of the manner described in :meth:`~Tensor.gather`.
  3314. :attr:`self`, :attr:`index` and :attr:`src` (if it is a Tensor) should all have
  3315. the same number of dimensions. It is also required that
  3316. ``index.size(d) <= src.size(d)`` for all dimensions ``d``, and that
  3317. ``index.size(d) <= self.size(d)`` for all dimensions ``d != dim``.
  3318. Note that ``index`` and ``src`` do not broadcast.
  3319. Moreover, as for :meth:`~Tensor.gather`, the values of :attr:`index` must be
  3320. between ``0`` and ``self.size(dim) - 1`` inclusive.
  3321. .. warning::
  3322. When indices are not unique, the behavior is non-deterministic (one of the
  3323. values from ``src`` will be picked arbitrarily) and the gradient will be
  3324. incorrect (it will be propagated to all locations in the source that
  3325. correspond to the same index)!
  3326. .. note::
  3327. The backward pass is implemented only for ``src.shape == index.shape``.
  3328. Additionally accepts an optional :attr:`reduce` argument that allows
  3329. specification of an optional reduction operation, which is applied to all
  3330. values in the tensor :attr:`src` into :attr:`self` at the indices
  3331. specified in the :attr:`index`. For each value in :attr:`src`, the reduction
  3332. operation is applied to an index in :attr:`self` which is specified by
  3333. its index in :attr:`src` for ``dimension != dim`` and by the corresponding
  3334. value in :attr:`index` for ``dimension = dim``.
  3335. Given a 3-D tensor and reduction using the multiplication operation, :attr:`self`
  3336. is updated as::
  3337. self[index[i][j][k]][j][k] *= src[i][j][k] # if dim == 0
  3338. self[i][index[i][j][k]][k] *= src[i][j][k] # if dim == 1
  3339. self[i][j][index[i][j][k]] *= src[i][j][k] # if dim == 2
  3340. Reducing with the addition operation is the same as using
  3341. :meth:`~torch.Tensor.scatter_add_`.
  3342. .. warning::
  3343. The reduce argument with Tensor ``src`` is deprecated and will be removed in
  3344. a future PyTorch release. Please use :meth:`~torch.Tensor.scatter_reduce_`
  3345. instead for more reduction options.
  3346. Args:
  3347. dim (int): the axis along which to index
  3348. index (LongTensor): the indices of elements to scatter, can be either empty
  3349. or of the same dimensionality as ``src``. When empty, the operation
  3350. returns ``self`` unchanged.
  3351. src (Tensor or float): the source element(s) to scatter.
  3352. reduce (str, optional): reduction operation to apply, can be either
  3353. ``'add'`` or ``'multiply'``.
  3354. Example::
  3355. >>> src = torch.arange(1, 11).reshape((2, 5))
  3356. >>> src
  3357. tensor([[ 1, 2, 3, 4, 5],
  3358. [ 6, 7, 8, 9, 10]])
  3359. >>> index = torch.tensor([[0, 1, 2, 0]])
  3360. >>> torch.zeros(3, 5, dtype=src.dtype).scatter_(0, index, src)
  3361. tensor([[1, 0, 0, 4, 0],
  3362. [0, 2, 0, 0, 0],
  3363. [0, 0, 3, 0, 0]])
  3364. >>> index = torch.tensor([[0, 1, 2], [0, 1, 4]])
  3365. >>> torch.zeros(3, 5, dtype=src.dtype).scatter_(1, index, src)
  3366. tensor([[1, 2, 3, 0, 0],
  3367. [6, 7, 0, 0, 8],
  3368. [0, 0, 0, 0, 0]])
  3369. >>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]),
  3370. ... 1.23, reduce='multiply')
  3371. tensor([[2.0000, 2.0000, 2.4600, 2.0000],
  3372. [2.0000, 2.0000, 2.0000, 2.4600]])
  3373. >>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]),
  3374. ... 1.23, reduce='add')
  3375. tensor([[2.0000, 2.0000, 3.2300, 2.0000],
  3376. [2.0000, 2.0000, 2.0000, 3.2300]])
  3377. """,
  3378. )
  3379. add_docstr_all(
  3380. "scatter_add_",
  3381. r"""
  3382. scatter_add_(dim, index, src) -> Tensor
  3383. Adds all values from the tensor :attr:`src` into :attr:`self` at the indices
  3384. specified in the :attr:`index` tensor in a similar fashion as
  3385. :meth:`~torch.Tensor.scatter_`. For each value in :attr:`src`, it is added to
  3386. an index in :attr:`self` which is specified by its index in :attr:`src`
  3387. for ``dimension != dim`` and by the corresponding value in :attr:`index` for
  3388. ``dimension = dim``.
  3389. For a 3-D tensor, :attr:`self` is updated as::
  3390. self[index[i][j][k]][j][k] += src[i][j][k] # if dim == 0
  3391. self[i][index[i][j][k]][k] += src[i][j][k] # if dim == 1
  3392. self[i][j][index[i][j][k]] += src[i][j][k] # if dim == 2
  3393. :attr:`self`, :attr:`index` and :attr:`src` should have same number of
  3394. dimensions. It is also required that ``index.size(d) <= src.size(d)`` for all
  3395. dimensions ``d``, and that ``index.size(d) <= self.size(d)`` for all dimensions
  3396. ``d != dim``. Note that ``index`` and ``src`` do not broadcast.
  3397. Note:
  3398. {forward_reproducibility_note}
  3399. .. note::
  3400. The backward pass is implemented only for ``src.shape == index.shape``.
  3401. Args:
  3402. dim (int): the axis along which to index
  3403. index (LongTensor): the indices of elements to scatter and add, can be
  3404. either empty or of the same dimensionality as ``src``. When empty, the
  3405. operation returns ``self`` unchanged.
  3406. src (Tensor): the source elements to scatter and add
  3407. Example::
  3408. >>> src = torch.ones((2, 5))
  3409. >>> index = torch.tensor([[0, 1, 2, 0, 0]])
  3410. >>> torch.zeros(3, 5, dtype=src.dtype).scatter_add_(0, index, src)
  3411. tensor([[1., 0., 0., 1., 1.],
  3412. [0., 1., 0., 0., 0.],
  3413. [0., 0., 1., 0., 0.]])
  3414. >>> index = torch.tensor([[0, 1, 2, 0, 0], [0, 1, 2, 2, 2]])
  3415. >>> torch.zeros(3, 5, dtype=src.dtype).scatter_add_(0, index, src)
  3416. tensor([[2., 0., 0., 1., 1.],
  3417. [0., 2., 0., 0., 0.],
  3418. [0., 0., 2., 1., 1.]])
  3419. """.format(
  3420. **reproducibility_notes
  3421. ),
  3422. )
  3423. add_docstr_all(
  3424. "scatter_reduce_",
  3425. r"""
  3426. scatter_reduce_(dim, index, src, reduce, *, include_self=True) -> Tensor
  3427. Reduces all values from the :attr:`src` tensor to the indices specified in
  3428. the :attr:`index` tensor in the :attr:`self` tensor using the applied reduction
  3429. defined via the :attr:`reduce` argument (:obj:`"sum"`, :obj:`"prod"`, :obj:`"mean"`,
  3430. :obj:`"amax"`, :obj:`"amin"`). For each value in :attr:`src`, it is reduced to an
  3431. index in :attr:`self` which is specified by its index in :attr:`src` for
  3432. ``dimension != dim`` and by the corresponding value in :attr:`index` for
  3433. ``dimension = dim``. If :obj:`include_self="True"`, the values in the :attr:`self`
  3434. tensor are included in the reduction.
  3435. :attr:`self`, :attr:`index` and :attr:`src` should all have
  3436. the same number of dimensions. It is also required that
  3437. ``index.size(d) <= src.size(d)`` for all dimensions ``d``, and that
  3438. ``index.size(d) <= self.size(d)`` for all dimensions ``d != dim``.
  3439. Note that ``index`` and ``src`` do not broadcast.
  3440. For a 3-D tensor with :obj:`reduce="sum"` and :obj:`include_self=True` the
  3441. output is given as::
  3442. self[index[i][j][k]][j][k] += src[i][j][k] # if dim == 0
  3443. self[i][index[i][j][k]][k] += src[i][j][k] # if dim == 1
  3444. self[i][j][index[i][j][k]] += src[i][j][k] # if dim == 2
  3445. Note:
  3446. {forward_reproducibility_note}
  3447. .. note::
  3448. The backward pass is implemented only for ``src.shape == index.shape``.
  3449. .. warning::
  3450. This function is in beta and may change in the near future.
  3451. Args:
  3452. dim (int): the axis along which to index
  3453. index (LongTensor): the indices of elements to scatter and reduce.
  3454. src (Tensor): the source elements to scatter and reduce
  3455. reduce (str): the reduction operation to apply for non-unique indices
  3456. (:obj:`"sum"`, :obj:`"prod"`, :obj:`"mean"`, :obj:`"amax"`, :obj:`"amin"`)
  3457. include_self (bool): whether elements from the :attr:`self` tensor are
  3458. included in the reduction
  3459. Example::
  3460. >>> src = torch.tensor([1., 2., 3., 4., 5., 6.])
  3461. >>> index = torch.tensor([0, 1, 0, 1, 2, 1])
  3462. >>> input = torch.tensor([1., 2., 3., 4.])
  3463. >>> input.scatter_reduce(0, index, src, reduce="sum")
  3464. tensor([5., 14., 8., 4.])
  3465. >>> input.scatter_reduce(0, index, src, reduce="sum", include_self=False)
  3466. tensor([4., 12., 5., 4.])
  3467. >>> input2 = torch.tensor([5., 4., 3., 2.])
  3468. >>> input2.scatter_reduce(0, index, src, reduce="amax")
  3469. tensor([5., 6., 5., 2.])
  3470. >>> input2.scatter_reduce(0, index, src, reduce="amax", include_self=False)
  3471. tensor([3., 6., 5., 2.])
  3472. """.format(
  3473. **reproducibility_notes
  3474. ),
  3475. )
  3476. add_docstr_all(
  3477. "select",
  3478. r"""
  3479. select(dim, index) -> Tensor
  3480. See :func:`torch.select`
  3481. """,
  3482. )
  3483. add_docstr_all(
  3484. "select_scatter",
  3485. r"""
  3486. select_scatter(src, dim, index) -> Tensor
  3487. See :func:`torch.select_scatter`
  3488. """,
  3489. )
  3490. add_docstr_all(
  3491. "slice_scatter",
  3492. r"""
  3493. slice_scatter(src, dim=0, start=None, end=None, step=1) -> Tensor
  3494. See :func:`torch.slice_scatter`
  3495. """,
  3496. )
  3497. add_docstr_all(
  3498. "set_",
  3499. r"""
  3500. set_(source=None, storage_offset=0, size=None, stride=None) -> Tensor
  3501. Sets the underlying storage, size, and strides. If :attr:`source` is a tensor,
  3502. :attr:`self` tensor will share the same storage and have the same size and
  3503. strides as :attr:`source`. Changes to elements in one tensor will be reflected
  3504. in the other.
  3505. If :attr:`source` is a :class:`~torch.Storage`, the method sets the underlying
  3506. storage, offset, size, and stride.
  3507. Args:
  3508. source (Tensor or Storage): the tensor or storage to use
  3509. storage_offset (int, optional): the offset in the storage
  3510. size (torch.Size, optional): the desired size. Defaults to the size of the source.
  3511. stride (tuple, optional): the desired stride. Defaults to C-contiguous strides.
  3512. """,
  3513. )
  3514. add_docstr_all(
  3515. "sigmoid",
  3516. r"""
  3517. sigmoid() -> Tensor
  3518. See :func:`torch.sigmoid`
  3519. """,
  3520. )
  3521. add_docstr_all(
  3522. "sigmoid_",
  3523. r"""
  3524. sigmoid_() -> Tensor
  3525. In-place version of :meth:`~Tensor.sigmoid`
  3526. """,
  3527. )
  3528. add_docstr_all(
  3529. "logit",
  3530. r"""
  3531. logit() -> Tensor
  3532. See :func:`torch.logit`
  3533. """,
  3534. )
  3535. add_docstr_all(
  3536. "logit_",
  3537. r"""
  3538. logit_() -> Tensor
  3539. In-place version of :meth:`~Tensor.logit`
  3540. """,
  3541. )
  3542. add_docstr_all(
  3543. "sign",
  3544. r"""
  3545. sign() -> Tensor
  3546. See :func:`torch.sign`
  3547. """,
  3548. )
  3549. add_docstr_all(
  3550. "sign_",
  3551. r"""
  3552. sign_() -> Tensor
  3553. In-place version of :meth:`~Tensor.sign`
  3554. """,
  3555. )
  3556. add_docstr_all(
  3557. "signbit",
  3558. r"""
  3559. signbit() -> Tensor
  3560. See :func:`torch.signbit`
  3561. """,
  3562. )
  3563. add_docstr_all(
  3564. "sgn",
  3565. r"""
  3566. sgn() -> Tensor
  3567. See :func:`torch.sgn`
  3568. """,
  3569. )
  3570. add_docstr_all(
  3571. "sgn_",
  3572. r"""
  3573. sgn_() -> Tensor
  3574. In-place version of :meth:`~Tensor.sgn`
  3575. """,
  3576. )
  3577. add_docstr_all(
  3578. "sin",
  3579. r"""
  3580. sin() -> Tensor
  3581. See :func:`torch.sin`
  3582. """,
  3583. )
  3584. add_docstr_all(
  3585. "sin_",
  3586. r"""
  3587. sin_() -> Tensor
  3588. In-place version of :meth:`~Tensor.sin`
  3589. """,
  3590. )
  3591. add_docstr_all(
  3592. "sinc",
  3593. r"""
  3594. sinc() -> Tensor
  3595. See :func:`torch.sinc`
  3596. """,
  3597. )
  3598. add_docstr_all(
  3599. "sinc_",
  3600. r"""
  3601. sinc_() -> Tensor
  3602. In-place version of :meth:`~Tensor.sinc`
  3603. """,
  3604. )
  3605. add_docstr_all(
  3606. "sinh",
  3607. r"""
  3608. sinh() -> Tensor
  3609. See :func:`torch.sinh`
  3610. """,
  3611. )
  3612. add_docstr_all(
  3613. "sinh_",
  3614. r"""
  3615. sinh_() -> Tensor
  3616. In-place version of :meth:`~Tensor.sinh`
  3617. """,
  3618. )
  3619. add_docstr_all(
  3620. "size",
  3621. r"""
  3622. size(dim=None) -> torch.Size or int
  3623. Returns the size of the :attr:`self` tensor. If ``dim`` is not specified,
  3624. the returned value is a :class:`torch.Size`, a subclass of :class:`tuple`.
  3625. If ``dim`` is specified, returns an int holding the size of that dimension.
  3626. Args:
  3627. dim (int, optional): The dimension for which to retrieve the size.
  3628. Example::
  3629. >>> t = torch.empty(3, 4, 5)
  3630. >>> t.size()
  3631. torch.Size([3, 4, 5])
  3632. >>> t.size(dim=1)
  3633. 4
  3634. """,
  3635. )
  3636. add_docstr_all(
  3637. "sort",
  3638. r"""
  3639. sort(dim=-1, descending=False) -> (Tensor, LongTensor)
  3640. See :func:`torch.sort`
  3641. """,
  3642. )
  3643. add_docstr_all(
  3644. "msort",
  3645. r"""
  3646. msort() -> Tensor
  3647. See :func:`torch.msort`
  3648. """,
  3649. )
  3650. add_docstr_all(
  3651. "argsort",
  3652. r"""
  3653. argsort(dim=-1, descending=False) -> LongTensor
  3654. See :func:`torch.argsort`
  3655. """,
  3656. )
  3657. add_docstr_all(
  3658. "sparse_dim",
  3659. r"""
  3660. sparse_dim() -> int
  3661. Return the number of sparse dimensions in a :ref:`sparse tensor <sparse-docs>` :attr:`self`.
  3662. .. note::
  3663. Returns ``0`` if :attr:`self` is not a sparse tensor.
  3664. See also :meth:`Tensor.dense_dim` and :ref:`hybrid tensors <sparse-hybrid-coo-docs>`.
  3665. """,
  3666. )
  3667. add_docstr_all(
  3668. "sparse_resize_",
  3669. r"""
  3670. sparse_resize_(size, sparse_dim, dense_dim) -> Tensor
  3671. Resizes :attr:`self` :ref:`sparse tensor <sparse-docs>` to the desired
  3672. size and the number of sparse and dense dimensions.
  3673. .. note::
  3674. If the number of specified elements in :attr:`self` is zero, then
  3675. :attr:`size`, :attr:`sparse_dim`, and :attr:`dense_dim` can be any
  3676. size and positive integers such that ``len(size) == sparse_dim +
  3677. dense_dim``.
  3678. If :attr:`self` specifies one or more elements, however, then each
  3679. dimension in :attr:`size` must not be smaller than the corresponding
  3680. dimension of :attr:`self`, :attr:`sparse_dim` must equal the number
  3681. of sparse dimensions in :attr:`self`, and :attr:`dense_dim` must
  3682. equal the number of dense dimensions in :attr:`self`.
  3683. .. warning::
  3684. Throws an error if :attr:`self` is not a sparse tensor.
  3685. Args:
  3686. size (torch.Size): the desired size. If :attr:`self` is non-empty
  3687. sparse tensor, the desired size cannot be smaller than the
  3688. original size.
  3689. sparse_dim (int): the number of sparse dimensions
  3690. dense_dim (int): the number of dense dimensions
  3691. """,
  3692. )
  3693. add_docstr_all(
  3694. "sparse_resize_and_clear_",
  3695. r"""
  3696. sparse_resize_and_clear_(size, sparse_dim, dense_dim) -> Tensor
  3697. Removes all specified elements from a :ref:`sparse tensor
  3698. <sparse-docs>` :attr:`self` and resizes :attr:`self` to the desired
  3699. size and the number of sparse and dense dimensions.
  3700. .. warning:
  3701. Throws an error if :attr:`self` is not a sparse tensor.
  3702. Args:
  3703. size (torch.Size): the desired size.
  3704. sparse_dim (int): the number of sparse dimensions
  3705. dense_dim (int): the number of dense dimensions
  3706. """,
  3707. )
  3708. add_docstr_all(
  3709. "sqrt",
  3710. r"""
  3711. sqrt() -> Tensor
  3712. See :func:`torch.sqrt`
  3713. """,
  3714. )
  3715. add_docstr_all(
  3716. "sqrt_",
  3717. r"""
  3718. sqrt_() -> Tensor
  3719. In-place version of :meth:`~Tensor.sqrt`
  3720. """,
  3721. )
  3722. add_docstr_all(
  3723. "square",
  3724. r"""
  3725. square() -> Tensor
  3726. See :func:`torch.square`
  3727. """,
  3728. )
  3729. add_docstr_all(
  3730. "square_",
  3731. r"""
  3732. square_() -> Tensor
  3733. In-place version of :meth:`~Tensor.square`
  3734. """,
  3735. )
  3736. add_docstr_all(
  3737. "squeeze",
  3738. r"""
  3739. squeeze(dim=None) -> Tensor
  3740. See :func:`torch.squeeze`
  3741. """,
  3742. )
  3743. add_docstr_all(
  3744. "squeeze_",
  3745. r"""
  3746. squeeze_(dim=None) -> Tensor
  3747. In-place version of :meth:`~Tensor.squeeze`
  3748. """,
  3749. )
  3750. add_docstr_all(
  3751. "std",
  3752. r"""
  3753. std(dim=None, *, correction=1, keepdim=False) -> Tensor
  3754. See :func:`torch.std`
  3755. """,
  3756. )
  3757. add_docstr_all(
  3758. "storage_offset",
  3759. r"""
  3760. storage_offset() -> int
  3761. Returns :attr:`self` tensor's offset in the underlying storage in terms of
  3762. number of storage elements (not bytes).
  3763. Example::
  3764. >>> x = torch.tensor([1, 2, 3, 4, 5])
  3765. >>> x.storage_offset()
  3766. 0
  3767. >>> x[3:].storage_offset()
  3768. 3
  3769. """,
  3770. )
  3771. add_docstr_all(
  3772. "untyped_storage",
  3773. r"""
  3774. untyped_storage() -> torch.UntypedStorage
  3775. Returns the underlying :class:`UntypedStorage`.
  3776. """,
  3777. )
  3778. add_docstr_all(
  3779. "stride",
  3780. r"""
  3781. stride(dim) -> tuple or int
  3782. Returns the stride of :attr:`self` tensor.
  3783. Stride is the jump necessary to go from one element to the next one in the
  3784. specified dimension :attr:`dim`. A tuple of all strides is returned when no
  3785. argument is passed in. Otherwise, an integer value is returned as the stride in
  3786. the particular dimension :attr:`dim`.
  3787. Args:
  3788. dim (int, optional): the desired dimension in which stride is required
  3789. Example::
  3790. >>> x = torch.tensor([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])
  3791. >>> x.stride()
  3792. (5, 1)
  3793. >>> x.stride(0)
  3794. 5
  3795. >>> x.stride(-1)
  3796. 1
  3797. """,
  3798. )
  3799. add_docstr_all(
  3800. "sub",
  3801. r"""
  3802. sub(other, *, alpha=1) -> Tensor
  3803. See :func:`torch.sub`.
  3804. """,
  3805. )
  3806. add_docstr_all(
  3807. "sub_",
  3808. r"""
  3809. sub_(other, *, alpha=1) -> Tensor
  3810. In-place version of :meth:`~Tensor.sub`
  3811. """,
  3812. )
  3813. add_docstr_all(
  3814. "subtract",
  3815. r"""
  3816. subtract(other, *, alpha=1) -> Tensor
  3817. See :func:`torch.subtract`.
  3818. """,
  3819. )
  3820. add_docstr_all(
  3821. "subtract_",
  3822. r"""
  3823. subtract_(other, *, alpha=1) -> Tensor
  3824. In-place version of :meth:`~Tensor.subtract`.
  3825. """,
  3826. )
  3827. add_docstr_all(
  3828. "sum",
  3829. r"""
  3830. sum(dim=None, keepdim=False, dtype=None) -> Tensor
  3831. See :func:`torch.sum`
  3832. """,
  3833. )
  3834. add_docstr_all(
  3835. "nansum",
  3836. r"""
  3837. nansum(dim=None, keepdim=False, dtype=None) -> Tensor
  3838. See :func:`torch.nansum`
  3839. """,
  3840. )
  3841. add_docstr_all(
  3842. "svd",
  3843. r"""
  3844. svd(some=True, compute_uv=True) -> (Tensor, Tensor, Tensor)
  3845. See :func:`torch.svd`
  3846. """,
  3847. )
  3848. add_docstr_all(
  3849. "swapdims",
  3850. r"""
  3851. swapdims(dim0, dim1) -> Tensor
  3852. See :func:`torch.swapdims`
  3853. """,
  3854. )
  3855. add_docstr_all(
  3856. "swapdims_",
  3857. r"""
  3858. swapdims_(dim0, dim1) -> Tensor
  3859. In-place version of :meth:`~Tensor.swapdims`
  3860. """,
  3861. )
  3862. add_docstr_all(
  3863. "swapaxes",
  3864. r"""
  3865. swapaxes(axis0, axis1) -> Tensor
  3866. See :func:`torch.swapaxes`
  3867. """,
  3868. )
  3869. add_docstr_all(
  3870. "swapaxes_",
  3871. r"""
  3872. swapaxes_(axis0, axis1) -> Tensor
  3873. In-place version of :meth:`~Tensor.swapaxes`
  3874. """,
  3875. )
  3876. add_docstr_all(
  3877. "t",
  3878. r"""
  3879. t() -> Tensor
  3880. See :func:`torch.t`
  3881. """,
  3882. )
  3883. add_docstr_all(
  3884. "t_",
  3885. r"""
  3886. t_() -> Tensor
  3887. In-place version of :meth:`~Tensor.t`
  3888. """,
  3889. )
  3890. add_docstr_all(
  3891. "tile",
  3892. r"""
  3893. tile(*reps) -> Tensor
  3894. See :func:`torch.tile`
  3895. """,
  3896. )
  3897. add_docstr_all(
  3898. "to",
  3899. r"""
  3900. to(*args, **kwargs) -> Tensor
  3901. Performs Tensor dtype and/or device conversion. A :class:`torch.dtype` and :class:`torch.device` are
  3902. inferred from the arguments of ``self.to(*args, **kwargs)``.
  3903. .. note::
  3904. If the ``self`` Tensor already
  3905. has the correct :class:`torch.dtype` and :class:`torch.device`, then ``self`` is returned.
  3906. Otherwise, the returned tensor is a copy of ``self`` with the desired
  3907. :class:`torch.dtype` and :class:`torch.device`.
  3908. Here are the ways to call ``to``:
  3909. .. method:: to(dtype, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor
  3910. :noindex:
  3911. Returns a Tensor with the specified :attr:`dtype`
  3912. Args:
  3913. {memory_format}
  3914. .. method:: to(device=None, dtype=None, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor
  3915. :noindex:
  3916. Returns a Tensor with the specified :attr:`device` and (optional)
  3917. :attr:`dtype`. If :attr:`dtype` is ``None`` it is inferred to be ``self.dtype``.
  3918. When :attr:`non_blocking`, tries to convert asynchronously with respect to
  3919. the host if possible, e.g., converting a CPU Tensor with pinned memory to a
  3920. CUDA Tensor.
  3921. When :attr:`copy` is set, a new Tensor is created even when the Tensor
  3922. already matches the desired conversion.
  3923. Args:
  3924. {memory_format}
  3925. .. method:: to(other, non_blocking=False, copy=False) -> Tensor
  3926. :noindex:
  3927. Returns a Tensor with same :class:`torch.dtype` and :class:`torch.device` as
  3928. the Tensor :attr:`other`. When :attr:`non_blocking`, tries to convert
  3929. asynchronously with respect to the host if possible, e.g., converting a CPU
  3930. Tensor with pinned memory to a CUDA Tensor.
  3931. When :attr:`copy` is set, a new Tensor is created even when the Tensor
  3932. already matches the desired conversion.
  3933. Example::
  3934. >>> tensor = torch.randn(2, 2) # Initially dtype=float32, device=cpu
  3935. >>> tensor.to(torch.float64)
  3936. tensor([[-0.5044, 0.0005],
  3937. [ 0.3310, -0.0584]], dtype=torch.float64)
  3938. >>> cuda0 = torch.device('cuda:0')
  3939. >>> tensor.to(cuda0)
  3940. tensor([[-0.5044, 0.0005],
  3941. [ 0.3310, -0.0584]], device='cuda:0')
  3942. >>> tensor.to(cuda0, dtype=torch.float64)
  3943. tensor([[-0.5044, 0.0005],
  3944. [ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0')
  3945. >>> other = torch.randn((), dtype=torch.float64, device=cuda0)
  3946. >>> tensor.to(other, non_blocking=True)
  3947. tensor([[-0.5044, 0.0005],
  3948. [ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0')
  3949. """.format(
  3950. **common_args
  3951. ),
  3952. )
  3953. add_docstr_all(
  3954. "byte",
  3955. r"""
  3956. byte(memory_format=torch.preserve_format) -> Tensor
  3957. ``self.byte()`` is equivalent to ``self.to(torch.uint8)``. See :func:`to`.
  3958. Args:
  3959. {memory_format}
  3960. """.format(
  3961. **common_args
  3962. ),
  3963. )
  3964. add_docstr_all(
  3965. "bool",
  3966. r"""
  3967. bool(memory_format=torch.preserve_format) -> Tensor
  3968. ``self.bool()`` is equivalent to ``self.to(torch.bool)``. See :func:`to`.
  3969. Args:
  3970. {memory_format}
  3971. """.format(
  3972. **common_args
  3973. ),
  3974. )
  3975. add_docstr_all(
  3976. "char",
  3977. r"""
  3978. char(memory_format=torch.preserve_format) -> Tensor
  3979. ``self.char()`` is equivalent to ``self.to(torch.int8)``. See :func:`to`.
  3980. Args:
  3981. {memory_format}
  3982. """.format(
  3983. **common_args
  3984. ),
  3985. )
  3986. add_docstr_all(
  3987. "bfloat16",
  3988. r"""
  3989. bfloat16(memory_format=torch.preserve_format) -> Tensor
  3990. ``self.bfloat16()`` is equivalent to ``self.to(torch.bfloat16)``. See :func:`to`.
  3991. Args:
  3992. {memory_format}
  3993. """.format(
  3994. **common_args
  3995. ),
  3996. )
  3997. add_docstr_all(
  3998. "double",
  3999. r"""
  4000. double(memory_format=torch.preserve_format) -> Tensor
  4001. ``self.double()`` is equivalent to ``self.to(torch.float64)``. See :func:`to`.
  4002. Args:
  4003. {memory_format}
  4004. """.format(
  4005. **common_args
  4006. ),
  4007. )
  4008. add_docstr_all(
  4009. "float",
  4010. r"""
  4011. float(memory_format=torch.preserve_format) -> Tensor
  4012. ``self.float()`` is equivalent to ``self.to(torch.float32)``. See :func:`to`.
  4013. Args:
  4014. {memory_format}
  4015. """.format(
  4016. **common_args
  4017. ),
  4018. )
  4019. add_docstr_all(
  4020. "cdouble",
  4021. r"""
  4022. cdouble(memory_format=torch.preserve_format) -> Tensor
  4023. ``self.cdouble()`` is equivalent to ``self.to(torch.complex128)``. See :func:`to`.
  4024. Args:
  4025. {memory_format}
  4026. """.format(
  4027. **common_args
  4028. ),
  4029. )
  4030. add_docstr_all(
  4031. "cfloat",
  4032. r"""
  4033. cfloat(memory_format=torch.preserve_format) -> Tensor
  4034. ``self.cfloat()`` is equivalent to ``self.to(torch.complex64)``. See :func:`to`.
  4035. Args:
  4036. {memory_format}
  4037. """.format(
  4038. **common_args
  4039. ),
  4040. )
  4041. add_docstr_all(
  4042. "chalf",
  4043. r"""
  4044. chalf(memory_format=torch.preserve_format) -> Tensor
  4045. ``self.chalf()`` is equivalent to ``self.to(torch.complex32)``. See :func:`to`.
  4046. Args:
  4047. {memory_format}
  4048. """.format(
  4049. **common_args
  4050. ),
  4051. )
  4052. add_docstr_all(
  4053. "half",
  4054. r"""
  4055. half(memory_format=torch.preserve_format) -> Tensor
  4056. ``self.half()`` is equivalent to ``self.to(torch.float16)``. See :func:`to`.
  4057. Args:
  4058. {memory_format}
  4059. """.format(
  4060. **common_args
  4061. ),
  4062. )
  4063. add_docstr_all(
  4064. "int",
  4065. r"""
  4066. int(memory_format=torch.preserve_format) -> Tensor
  4067. ``self.int()`` is equivalent to ``self.to(torch.int32)``. See :func:`to`.
  4068. Args:
  4069. {memory_format}
  4070. """.format(
  4071. **common_args
  4072. ),
  4073. )
  4074. add_docstr_all(
  4075. "int_repr",
  4076. r"""
  4077. int_repr() -> Tensor
  4078. Given a quantized Tensor,
  4079. ``self.int_repr()`` returns a CPU Tensor with uint8_t as data type that stores the
  4080. underlying uint8_t values of the given Tensor.
  4081. """,
  4082. )
  4083. add_docstr_all(
  4084. "long",
  4085. r"""
  4086. long(memory_format=torch.preserve_format) -> Tensor
  4087. ``self.long()`` is equivalent to ``self.to(torch.int64)``. See :func:`to`.
  4088. Args:
  4089. {memory_format}
  4090. """.format(
  4091. **common_args
  4092. ),
  4093. )
  4094. add_docstr_all(
  4095. "short",
  4096. r"""
  4097. short(memory_format=torch.preserve_format) -> Tensor
  4098. ``self.short()`` is equivalent to ``self.to(torch.int16)``. See :func:`to`.
  4099. Args:
  4100. {memory_format}
  4101. """.format(
  4102. **common_args
  4103. ),
  4104. )
  4105. add_docstr_all(
  4106. "take",
  4107. r"""
  4108. take(indices) -> Tensor
  4109. See :func:`torch.take`
  4110. """,
  4111. )
  4112. add_docstr_all(
  4113. "take_along_dim",
  4114. r"""
  4115. take_along_dim(indices, dim) -> Tensor
  4116. See :func:`torch.take_along_dim`
  4117. """,
  4118. )
  4119. add_docstr_all(
  4120. "tan",
  4121. r"""
  4122. tan() -> Tensor
  4123. See :func:`torch.tan`
  4124. """,
  4125. )
  4126. add_docstr_all(
  4127. "tan_",
  4128. r"""
  4129. tan_() -> Tensor
  4130. In-place version of :meth:`~Tensor.tan`
  4131. """,
  4132. )
  4133. add_docstr_all(
  4134. "tanh",
  4135. r"""
  4136. tanh() -> Tensor
  4137. See :func:`torch.tanh`
  4138. """,
  4139. )
  4140. add_docstr_all(
  4141. "softmax",
  4142. r"""
  4143. softmax(dim) -> Tensor
  4144. Alias for :func:`torch.nn.functional.softmax`.
  4145. """,
  4146. )
  4147. add_docstr_all(
  4148. "tanh_",
  4149. r"""
  4150. tanh_() -> Tensor
  4151. In-place version of :meth:`~Tensor.tanh`
  4152. """,
  4153. )
  4154. add_docstr_all(
  4155. "tolist",
  4156. r"""
  4157. tolist() -> list or number
  4158. Returns the tensor as a (nested) list. For scalars, a standard
  4159. Python number is returned, just like with :meth:`~Tensor.item`.
  4160. Tensors are automatically moved to the CPU first if necessary.
  4161. This operation is not differentiable.
  4162. Examples::
  4163. >>> a = torch.randn(2, 2)
  4164. >>> a.tolist()
  4165. [[0.012766935862600803, 0.5415473580360413],
  4166. [-0.08909505605697632, 0.7729271650314331]]
  4167. >>> a[0,0].tolist()
  4168. 0.012766935862600803
  4169. """,
  4170. )
  4171. add_docstr_all(
  4172. "topk",
  4173. r"""
  4174. topk(k, dim=None, largest=True, sorted=True) -> (Tensor, LongTensor)
  4175. See :func:`torch.topk`
  4176. """,
  4177. )
  4178. add_docstr_all(
  4179. "to_dense",
  4180. r"""
  4181. to_dense() -> Tensor
  4182. Creates a strided copy of :attr:`self` if :attr:`self` is not a strided tensor, otherwise returns :attr:`self`.
  4183. Example::
  4184. >>> s = torch.sparse_coo_tensor(
  4185. ... torch.tensor([[1, 1],
  4186. ... [0, 2]]),
  4187. ... torch.tensor([9, 10]),
  4188. ... size=(3, 3))
  4189. >>> s.to_dense()
  4190. tensor([[ 0, 0, 0],
  4191. [ 9, 0, 10],
  4192. [ 0, 0, 0]])
  4193. """,
  4194. )
  4195. add_docstr_all(
  4196. "to_sparse",
  4197. r"""
  4198. to_sparse(sparseDims) -> Tensor
  4199. Returns a sparse copy of the tensor. PyTorch supports sparse tensors in
  4200. :ref:`coordinate format <sparse-coo-docs>`.
  4201. Args:
  4202. sparseDims (int, optional): the number of sparse dimensions to include in the new sparse tensor
  4203. Example::
  4204. >>> d = torch.tensor([[0, 0, 0], [9, 0, 10], [0, 0, 0]])
  4205. >>> d
  4206. tensor([[ 0, 0, 0],
  4207. [ 9, 0, 10],
  4208. [ 0, 0, 0]])
  4209. >>> d.to_sparse()
  4210. tensor(indices=tensor([[1, 1],
  4211. [0, 2]]),
  4212. values=tensor([ 9, 10]),
  4213. size=(3, 3), nnz=2, layout=torch.sparse_coo)
  4214. >>> d.to_sparse(1)
  4215. tensor(indices=tensor([[1]]),
  4216. values=tensor([[ 9, 0, 10]]),
  4217. size=(3, 3), nnz=1, layout=torch.sparse_coo)
  4218. .. method:: to_sparse(*, layout=None, blocksize=None, dense_dim=None) -> Tensor
  4219. :noindex:
  4220. Returns a sparse tensor with the specified layout and blocksize. If
  4221. the :attr:`self` is strided, the number of dense dimensions could be
  4222. specified, and a hybrid sparse tensor will be created, with
  4223. `dense_dim` dense dimensions and `self.dim() - 2 - dense_dim` batch
  4224. dimension.
  4225. .. note:: If the :attr:`self` layout and blocksize parameters match
  4226. with the specified layout and blocksize, return
  4227. :attr:`self`. Otherwise, return a sparse tensor copy of
  4228. :attr:`self`.
  4229. Args:
  4230. layout (:class:`torch.layout`, optional): The desired sparse
  4231. layout. One of ``torch.sparse_coo``, ``torch.sparse_csr``,
  4232. ``torch.sparse_csc``, ``torch.sparse_bsr``, or
  4233. ``torch.sparse_bsc``. Default: if ``None``,
  4234. ``torch.sparse_coo``.
  4235. blocksize (list, tuple, :class:`torch.Size`, optional): Block size
  4236. of the resulting BSR or BSC tensor. For other layouts,
  4237. specifying the block size that is not ``None`` will result in a
  4238. RuntimeError exception. A block size must be a tuple of length
  4239. two such that its items evenly divide the two sparse dimensions.
  4240. dense_dim (int, optional): Number of dense dimensions of the
  4241. resulting CSR, CSC, BSR or BSC tensor. This argument should be
  4242. used only if :attr:`self` is a strided tensor, and must be a
  4243. value between 0 and dimension of :attr:`self` tensor minus two.
  4244. Example::
  4245. >>> x = torch.tensor([[1, 0], [0, 0], [2, 3]])
  4246. >>> x.to_sparse(layout=torch.sparse_coo)
  4247. tensor(indices=tensor([[0, 2, 2],
  4248. [0, 0, 1]]),
  4249. values=tensor([1, 2, 3]),
  4250. size=(3, 2), nnz=3, layout=torch.sparse_coo)
  4251. >>> x.to_sparse(layout=torch.sparse_bsr, blocksize=(1, 2))
  4252. tensor(crow_indices=tensor([0, 1, 1, 2]),
  4253. col_indices=tensor([0, 0]),
  4254. values=tensor([[[1, 0]],
  4255. [[2, 3]]]), size=(3, 2), nnz=2, layout=torch.sparse_bsr)
  4256. >>> x.to_sparse(layout=torch.sparse_bsr, blocksize=(2, 1))
  4257. RuntimeError: Tensor size(-2) 3 needs to be divisible by blocksize[0] 2
  4258. >>> x.to_sparse(layout=torch.sparse_csr, blocksize=(3, 1))
  4259. RuntimeError: to_sparse for Strided to SparseCsr conversion does not use specified blocksize
  4260. >>> x = torch.tensor([[[1], [0]], [[0], [0]], [[2], [3]]])
  4261. >>> x.to_sparse(layout=torch.sparse_csr, dense_dim=1)
  4262. tensor(crow_indices=tensor([0, 1, 1, 3]),
  4263. col_indices=tensor([0, 0, 1]),
  4264. values=tensor([[1],
  4265. [2],
  4266. [3]]), size=(3, 2, 1), nnz=3, layout=torch.sparse_csr)
  4267. """,
  4268. )
  4269. add_docstr_all(
  4270. "to_sparse_csr",
  4271. r"""
  4272. to_sparse_csr(dense_dim=None) -> Tensor
  4273. Convert a tensor to compressed row storage format (CSR). Except for
  4274. strided tensors, only works with 2D tensors. If the :attr:`self` is
  4275. strided, then the number of dense dimensions could be specified, and a
  4276. hybrid CSR tensor will be created, with `dense_dim` dense dimensions
  4277. and `self.dim() - 2 - dense_dim` batch dimension.
  4278. Args:
  4279. dense_dim (int, optional): Number of dense dimensions of the
  4280. resulting CSR tensor. This argument should be used only if
  4281. :attr:`self` is a strided tensor, and must be a value between 0
  4282. and dimension of :attr:`self` tensor minus two.
  4283. Example::
  4284. >>> dense = torch.randn(5, 5)
  4285. >>> sparse = dense.to_sparse_csr()
  4286. >>> sparse._nnz()
  4287. 25
  4288. >>> dense = torch.zeros(3, 3, 1, 1)
  4289. >>> dense[0, 0] = dense[1, 2] = dense[2, 1] = 1
  4290. >>> dense.to_sparse_csr(dense_dim=2)
  4291. tensor(crow_indices=tensor([0, 1, 2, 3]),
  4292. col_indices=tensor([0, 2, 1]),
  4293. values=tensor([[[1.]],
  4294. [[1.]],
  4295. [[1.]]]), size=(3, 3, 1, 1), nnz=3,
  4296. layout=torch.sparse_csr)
  4297. """,
  4298. )
  4299. add_docstr_all(
  4300. "to_sparse_csc",
  4301. r"""
  4302. to_sparse_csc() -> Tensor
  4303. Convert a tensor to compressed column storage (CSC) format. Except
  4304. for strided tensors, only works with 2D tensors. If the :attr:`self`
  4305. is strided, then the number of dense dimensions could be specified,
  4306. and a hybrid CSC tensor will be created, with `dense_dim` dense
  4307. dimensions and `self.dim() - 2 - dense_dim` batch dimension.
  4308. Args:
  4309. dense_dim (int, optional): Number of dense dimensions of the
  4310. resulting CSC tensor. This argument should be used only if
  4311. :attr:`self` is a strided tensor, and must be a value between 0
  4312. and dimension of :attr:`self` tensor minus two.
  4313. Example::
  4314. >>> dense = torch.randn(5, 5)
  4315. >>> sparse = dense.to_sparse_csc()
  4316. >>> sparse._nnz()
  4317. 25
  4318. >>> dense = torch.zeros(3, 3, 1, 1)
  4319. >>> dense[0, 0] = dense[1, 2] = dense[2, 1] = 1
  4320. >>> dense.to_sparse_csc(dense_dim=2)
  4321. tensor(ccol_indices=tensor([0, 1, 2, 3]),
  4322. row_indices=tensor([0, 2, 1]),
  4323. values=tensor([[[1.]],
  4324. [[1.]],
  4325. [[1.]]]), size=(3, 3, 1, 1), nnz=3,
  4326. layout=torch.sparse_csc)
  4327. """,
  4328. )
  4329. add_docstr_all(
  4330. "to_sparse_bsr",
  4331. r"""
  4332. to_sparse_bsr(blocksize, dense_dim) -> Tensor
  4333. Convert a tensor to a block sparse row (BSR) storage format of given
  4334. blocksize. If the :attr:`self` is strided, then the number of dense
  4335. dimensions could be specified, and a hybrid BSR tensor will be
  4336. created, with `dense_dim` dense dimensions and `self.dim() - 2 -
  4337. dense_dim` batch dimension.
  4338. Args:
  4339. blocksize (list, tuple, :class:`torch.Size`, optional): Block size
  4340. of the resulting BSR tensor. A block size must be a tuple of
  4341. length two such that its items evenly divide the two sparse
  4342. dimensions.
  4343. dense_dim (int, optional): Number of dense dimensions of the
  4344. resulting BSR tensor. This argument should be used only if
  4345. :attr:`self` is a strided tensor, and must be a value between 0
  4346. and dimension of :attr:`self` tensor minus two.
  4347. Example::
  4348. >>> dense = torch.randn(10, 10)
  4349. >>> sparse = dense.to_sparse_csr()
  4350. >>> sparse_bsr = sparse.to_sparse_bsr((5, 5))
  4351. >>> sparse_bsr.col_indices()
  4352. tensor([0, 1, 0, 1])
  4353. >>> dense = torch.zeros(4, 3, 1)
  4354. >>> dense[0:2, 0] = dense[0:2, 2] = dense[2:4, 1] = 1
  4355. >>> dense.to_sparse_bsr((2, 1), 1)
  4356. tensor(crow_indices=tensor([0, 2, 3]),
  4357. col_indices=tensor([0, 2, 1]),
  4358. values=tensor([[[[1.]],
  4359. [[1.]]],
  4360. [[[1.]],
  4361. [[1.]]],
  4362. [[[1.]],
  4363. [[1.]]]]), size=(4, 3, 1), nnz=3,
  4364. layout=torch.sparse_bsr)
  4365. """,
  4366. )
  4367. add_docstr_all(
  4368. "to_sparse_bsc",
  4369. r"""
  4370. to_sparse_bsc(blocksize, dense_dim) -> Tensor
  4371. Convert a tensor to a block sparse column (BSC) storage format of
  4372. given blocksize. If the :attr:`self` is strided, then the number of
  4373. dense dimensions could be specified, and a hybrid BSC tensor will be
  4374. created, with `dense_dim` dense dimensions and `self.dim() - 2 -
  4375. dense_dim` batch dimension.
  4376. Args:
  4377. blocksize (list, tuple, :class:`torch.Size`, optional): Block size
  4378. of the resulting BSC tensor. A block size must be a tuple of
  4379. length two such that its items evenly divide the two sparse
  4380. dimensions.
  4381. dense_dim (int, optional): Number of dense dimensions of the
  4382. resulting BSC tensor. This argument should be used only if
  4383. :attr:`self` is a strided tensor, and must be a value between 0
  4384. and dimension of :attr:`self` tensor minus two.
  4385. Example::
  4386. >>> dense = torch.randn(10, 10)
  4387. >>> sparse = dense.to_sparse_csr()
  4388. >>> sparse_bsc = sparse.to_sparse_bsc((5, 5))
  4389. >>> sparse_bsc.row_indices()
  4390. tensor([0, 1, 0, 1])
  4391. >>> dense = torch.zeros(4, 3, 1)
  4392. >>> dense[0:2, 0] = dense[0:2, 2] = dense[2:4, 1] = 1
  4393. >>> dense.to_sparse_bsc((2, 1), 1)
  4394. tensor(ccol_indices=tensor([0, 1, 2, 3]),
  4395. row_indices=tensor([0, 1, 0]),
  4396. values=tensor([[[[1.]],
  4397. [[1.]]],
  4398. [[[1.]],
  4399. [[1.]]],
  4400. [[[1.]],
  4401. [[1.]]]]), size=(4, 3, 1), nnz=3,
  4402. layout=torch.sparse_bsc)
  4403. """,
  4404. )
  4405. add_docstr_all(
  4406. "to_mkldnn",
  4407. r"""
  4408. to_mkldnn() -> Tensor
  4409. Returns a copy of the tensor in ``torch.mkldnn`` layout.
  4410. """,
  4411. )
  4412. add_docstr_all(
  4413. "trace",
  4414. r"""
  4415. trace() -> Tensor
  4416. See :func:`torch.trace`
  4417. """,
  4418. )
  4419. add_docstr_all(
  4420. "transpose",
  4421. r"""
  4422. transpose(dim0, dim1) -> Tensor
  4423. See :func:`torch.transpose`
  4424. """,
  4425. )
  4426. add_docstr_all(
  4427. "transpose_",
  4428. r"""
  4429. transpose_(dim0, dim1) -> Tensor
  4430. In-place version of :meth:`~Tensor.transpose`
  4431. """,
  4432. )
  4433. add_docstr_all(
  4434. "triangular_solve",
  4435. r"""
  4436. triangular_solve(A, upper=True, transpose=False, unitriangular=False) -> (Tensor, Tensor)
  4437. See :func:`torch.triangular_solve`
  4438. """,
  4439. )
  4440. add_docstr_all(
  4441. "tril",
  4442. r"""
  4443. tril(diagonal=0) -> Tensor
  4444. See :func:`torch.tril`
  4445. """,
  4446. )
  4447. add_docstr_all(
  4448. "tril_",
  4449. r"""
  4450. tril_(diagonal=0) -> Tensor
  4451. In-place version of :meth:`~Tensor.tril`
  4452. """,
  4453. )
  4454. add_docstr_all(
  4455. "triu",
  4456. r"""
  4457. triu(diagonal=0) -> Tensor
  4458. See :func:`torch.triu`
  4459. """,
  4460. )
  4461. add_docstr_all(
  4462. "triu_",
  4463. r"""
  4464. triu_(diagonal=0) -> Tensor
  4465. In-place version of :meth:`~Tensor.triu`
  4466. """,
  4467. )
  4468. add_docstr_all(
  4469. "true_divide",
  4470. r"""
  4471. true_divide(value) -> Tensor
  4472. See :func:`torch.true_divide`
  4473. """,
  4474. )
  4475. add_docstr_all(
  4476. "true_divide_",
  4477. r"""
  4478. true_divide_(value) -> Tensor
  4479. In-place version of :meth:`~Tensor.true_divide_`
  4480. """,
  4481. )
  4482. add_docstr_all(
  4483. "trunc",
  4484. r"""
  4485. trunc() -> Tensor
  4486. See :func:`torch.trunc`
  4487. """,
  4488. )
  4489. add_docstr_all(
  4490. "fix",
  4491. r"""
  4492. fix() -> Tensor
  4493. See :func:`torch.fix`.
  4494. """,
  4495. )
  4496. add_docstr_all(
  4497. "trunc_",
  4498. r"""
  4499. trunc_() -> Tensor
  4500. In-place version of :meth:`~Tensor.trunc`
  4501. """,
  4502. )
  4503. add_docstr_all(
  4504. "fix_",
  4505. r"""
  4506. fix_() -> Tensor
  4507. In-place version of :meth:`~Tensor.fix`
  4508. """,
  4509. )
  4510. add_docstr_all(
  4511. "type",
  4512. r"""
  4513. type(dtype=None, non_blocking=False, **kwargs) -> str or Tensor
  4514. Returns the type if `dtype` is not provided, else casts this object to
  4515. the specified type.
  4516. If this is already of the correct type, no copy is performed and the
  4517. original object is returned.
  4518. Args:
  4519. dtype (dtype or string): The desired type
  4520. non_blocking (bool): If ``True``, and the source is in pinned memory
  4521. and destination is on the GPU or vice versa, the copy is performed
  4522. asynchronously with respect to the host. Otherwise, the argument
  4523. has no effect.
  4524. **kwargs: For compatibility, may contain the key ``async`` in place of
  4525. the ``non_blocking`` argument. The ``async`` arg is deprecated.
  4526. """,
  4527. )
  4528. add_docstr_all(
  4529. "type_as",
  4530. r"""
  4531. type_as(tensor) -> Tensor
  4532. Returns this tensor cast to the type of the given tensor.
  4533. This is a no-op if the tensor is already of the correct type. This is
  4534. equivalent to ``self.type(tensor.type())``
  4535. Args:
  4536. tensor (Tensor): the tensor which has the desired type
  4537. """,
  4538. )
  4539. add_docstr_all(
  4540. "unfold",
  4541. r"""
  4542. unfold(dimension, size, step) -> Tensor
  4543. Returns a view of the original tensor which contains all slices of size :attr:`size` from
  4544. :attr:`self` tensor in the dimension :attr:`dimension`.
  4545. Step between two slices is given by :attr:`step`.
  4546. If `sizedim` is the size of dimension :attr:`dimension` for :attr:`self`, the size of
  4547. dimension :attr:`dimension` in the returned tensor will be
  4548. `(sizedim - size) / step + 1`.
  4549. An additional dimension of size :attr:`size` is appended in the returned tensor.
  4550. Args:
  4551. dimension (int): dimension in which unfolding happens
  4552. size (int): the size of each slice that is unfolded
  4553. step (int): the step between each slice
  4554. Example::
  4555. >>> x = torch.arange(1., 8)
  4556. >>> x
  4557. tensor([ 1., 2., 3., 4., 5., 6., 7.])
  4558. >>> x.unfold(0, 2, 1)
  4559. tensor([[ 1., 2.],
  4560. [ 2., 3.],
  4561. [ 3., 4.],
  4562. [ 4., 5.],
  4563. [ 5., 6.],
  4564. [ 6., 7.]])
  4565. >>> x.unfold(0, 2, 2)
  4566. tensor([[ 1., 2.],
  4567. [ 3., 4.],
  4568. [ 5., 6.]])
  4569. """,
  4570. )
  4571. add_docstr_all(
  4572. "uniform_",
  4573. r"""
  4574. uniform_(from=0, to=1) -> Tensor
  4575. Fills :attr:`self` tensor with numbers sampled from the continuous uniform
  4576. distribution:
  4577. .. math::
  4578. P(x) = \dfrac{1}{\text{to} - \text{from}}
  4579. """,
  4580. )
  4581. add_docstr_all(
  4582. "unsqueeze",
  4583. r"""
  4584. unsqueeze(dim) -> Tensor
  4585. See :func:`torch.unsqueeze`
  4586. """,
  4587. )
  4588. add_docstr_all(
  4589. "unsqueeze_",
  4590. r"""
  4591. unsqueeze_(dim) -> Tensor
  4592. In-place version of :meth:`~Tensor.unsqueeze`
  4593. """,
  4594. )
  4595. add_docstr_all(
  4596. "var",
  4597. r"""
  4598. var(dim=None, *, correction=1, keepdim=False) -> Tensor
  4599. See :func:`torch.var`
  4600. """,
  4601. )
  4602. add_docstr_all(
  4603. "vdot",
  4604. r"""
  4605. vdot(other) -> Tensor
  4606. See :func:`torch.vdot`
  4607. """,
  4608. )
  4609. add_docstr_all(
  4610. "view",
  4611. r"""
  4612. view(*shape) -> Tensor
  4613. Returns a new tensor with the same data as the :attr:`self` tensor but of a
  4614. different :attr:`shape`.
  4615. The returned tensor shares the same data and must have the same number
  4616. of elements, but may have a different size. For a tensor to be viewed, the new
  4617. view size must be compatible with its original size and stride, i.e., each new
  4618. view dimension must either be a subspace of an original dimension, or only span
  4619. across original dimensions :math:`d, d+1, \dots, d+k` that satisfy the following
  4620. contiguity-like condition that :math:`\forall i = d, \dots, d+k-1`,
  4621. .. math::
  4622. \text{stride}[i] = \text{stride}[i+1] \times \text{size}[i+1]
  4623. Otherwise, it will not be possible to view :attr:`self` tensor as :attr:`shape`
  4624. without copying it (e.g., via :meth:`contiguous`). When it is unclear whether a
  4625. :meth:`view` can be performed, it is advisable to use :meth:`reshape`, which
  4626. returns a view if the shapes are compatible, and copies (equivalent to calling
  4627. :meth:`contiguous`) otherwise.
  4628. Args:
  4629. shape (torch.Size or int...): the desired size
  4630. Example::
  4631. >>> x = torch.randn(4, 4)
  4632. >>> x.size()
  4633. torch.Size([4, 4])
  4634. >>> y = x.view(16)
  4635. >>> y.size()
  4636. torch.Size([16])
  4637. >>> z = x.view(-1, 8) # the size -1 is inferred from other dimensions
  4638. >>> z.size()
  4639. torch.Size([2, 8])
  4640. >>> a = torch.randn(1, 2, 3, 4)
  4641. >>> a.size()
  4642. torch.Size([1, 2, 3, 4])
  4643. >>> b = a.transpose(1, 2) # Swaps 2nd and 3rd dimension
  4644. >>> b.size()
  4645. torch.Size([1, 3, 2, 4])
  4646. >>> c = a.view(1, 3, 2, 4) # Does not change tensor layout in memory
  4647. >>> c.size()
  4648. torch.Size([1, 3, 2, 4])
  4649. >>> torch.equal(b, c)
  4650. False
  4651. .. method:: view(dtype) -> Tensor
  4652. :noindex:
  4653. Returns a new tensor with the same data as the :attr:`self` tensor but of a
  4654. different :attr:`dtype`.
  4655. If the element size of :attr:`dtype` is different than that of ``self.dtype``,
  4656. then the size of the last dimension of the output will be scaled
  4657. proportionally. For instance, if :attr:`dtype` element size is twice that of
  4658. ``self.dtype``, then each pair of elements in the last dimension of
  4659. :attr:`self` will be combined, and the size of the last dimension of the output
  4660. will be half that of :attr:`self`. If :attr:`dtype` element size is half that
  4661. of ``self.dtype``, then each element in the last dimension of :attr:`self` will
  4662. be split in two, and the size of the last dimension of the output will be
  4663. double that of :attr:`self`. For this to be possible, the following conditions
  4664. must be true:
  4665. * ``self.dim()`` must be greater than 0.
  4666. * ``self.stride(-1)`` must be 1.
  4667. Additionally, if the element size of :attr:`dtype` is greater than that of
  4668. ``self.dtype``, the following conditions must be true as well:
  4669. * ``self.size(-1)`` must be divisible by the ratio between the element
  4670. sizes of the dtypes.
  4671. * ``self.storage_offset()`` must be divisible by the ratio between the
  4672. element sizes of the dtypes.
  4673. * The strides of all dimensions, except the last dimension, must be
  4674. divisible by the ratio between the element sizes of the dtypes.
  4675. If any of the above conditions are not met, an error is thrown.
  4676. .. warning::
  4677. This overload is not supported by TorchScript, and using it in a Torchscript
  4678. program will cause undefined behavior.
  4679. Args:
  4680. dtype (:class:`torch.dtype`): the desired dtype
  4681. Example::
  4682. >>> x = torch.randn(4, 4)
  4683. >>> x
  4684. tensor([[ 0.9482, -0.0310, 1.4999, -0.5316],
  4685. [-0.1520, 0.7472, 0.5617, -0.8649],
  4686. [-2.4724, -0.0334, -0.2976, -0.8499],
  4687. [-0.2109, 1.9913, -0.9607, -0.6123]])
  4688. >>> x.dtype
  4689. torch.float32
  4690. >>> y = x.view(torch.int32)
  4691. >>> y
  4692. tensor([[ 1064483442, -1124191867, 1069546515, -1089989247],
  4693. [-1105482831, 1061112040, 1057999968, -1084397505],
  4694. [-1071760287, -1123489973, -1097310419, -1084649136],
  4695. [-1101533110, 1073668768, -1082790149, -1088634448]],
  4696. dtype=torch.int32)
  4697. >>> y[0, 0] = 1000000000
  4698. >>> x
  4699. tensor([[ 0.0047, -0.0310, 1.4999, -0.5316],
  4700. [-0.1520, 0.7472, 0.5617, -0.8649],
  4701. [-2.4724, -0.0334, -0.2976, -0.8499],
  4702. [-0.2109, 1.9913, -0.9607, -0.6123]])
  4703. >>> x.view(torch.cfloat)
  4704. tensor([[ 0.0047-0.0310j, 1.4999-0.5316j],
  4705. [-0.1520+0.7472j, 0.5617-0.8649j],
  4706. [-2.4724-0.0334j, -0.2976-0.8499j],
  4707. [-0.2109+1.9913j, -0.9607-0.6123j]])
  4708. >>> x.view(torch.cfloat).size()
  4709. torch.Size([4, 2])
  4710. >>> x.view(torch.uint8)
  4711. tensor([[ 0, 202, 154, 59, 182, 243, 253, 188, 185, 252, 191, 63, 240, 22,
  4712. 8, 191],
  4713. [227, 165, 27, 190, 128, 72, 63, 63, 146, 203, 15, 63, 22, 106,
  4714. 93, 191],
  4715. [205, 59, 30, 192, 112, 206, 8, 189, 7, 95, 152, 190, 12, 147,
  4716. 89, 191],
  4717. [ 43, 246, 87, 190, 235, 226, 254, 63, 111, 240, 117, 191, 177, 191,
  4718. 28, 191]], dtype=torch.uint8)
  4719. >>> x.view(torch.uint8).size()
  4720. torch.Size([4, 16])
  4721. """,
  4722. )
  4723. add_docstr_all(
  4724. "view_as",
  4725. r"""
  4726. view_as(other) -> Tensor
  4727. View this tensor as the same size as :attr:`other`.
  4728. ``self.view_as(other)`` is equivalent to ``self.view(other.size())``.
  4729. Please see :meth:`~Tensor.view` for more information about ``view``.
  4730. Args:
  4731. other (:class:`torch.Tensor`): The result tensor has the same size
  4732. as :attr:`other`.
  4733. """,
  4734. )
  4735. add_docstr_all(
  4736. "expand",
  4737. r"""
  4738. expand(*sizes) -> Tensor
  4739. Returns a new view of the :attr:`self` tensor with singleton dimensions expanded
  4740. to a larger size.
  4741. Passing -1 as the size for a dimension means not changing the size of
  4742. that dimension.
  4743. Tensor can be also expanded to a larger number of dimensions, and the
  4744. new ones will be appended at the front. For the new dimensions, the
  4745. size cannot be set to -1.
  4746. Expanding a tensor does not allocate new memory, but only creates a
  4747. new view on the existing tensor where a dimension of size one is
  4748. expanded to a larger size by setting the ``stride`` to 0. Any dimension
  4749. of size 1 can be expanded to an arbitrary value without allocating new
  4750. memory.
  4751. Args:
  4752. *sizes (torch.Size or int...): the desired expanded size
  4753. .. warning::
  4754. More than one element of an expanded tensor may refer to a single
  4755. memory location. As a result, in-place operations (especially ones that
  4756. are vectorized) may result in incorrect behavior. If you need to write
  4757. to the tensors, please clone them first.
  4758. Example::
  4759. >>> x = torch.tensor([[1], [2], [3]])
  4760. >>> x.size()
  4761. torch.Size([3, 1])
  4762. >>> x.expand(3, 4)
  4763. tensor([[ 1, 1, 1, 1],
  4764. [ 2, 2, 2, 2],
  4765. [ 3, 3, 3, 3]])
  4766. >>> x.expand(-1, 4) # -1 means not changing the size of that dimension
  4767. tensor([[ 1, 1, 1, 1],
  4768. [ 2, 2, 2, 2],
  4769. [ 3, 3, 3, 3]])
  4770. """,
  4771. )
  4772. add_docstr_all(
  4773. "expand_as",
  4774. r"""
  4775. expand_as(other) -> Tensor
  4776. Expand this tensor to the same size as :attr:`other`.
  4777. ``self.expand_as(other)`` is equivalent to ``self.expand(other.size())``.
  4778. Please see :meth:`~Tensor.expand` for more information about ``expand``.
  4779. Args:
  4780. other (:class:`torch.Tensor`): The result tensor has the same size
  4781. as :attr:`other`.
  4782. """,
  4783. )
  4784. add_docstr_all(
  4785. "sum_to_size",
  4786. r"""
  4787. sum_to_size(*size) -> Tensor
  4788. Sum ``this`` tensor to :attr:`size`.
  4789. :attr:`size` must be broadcastable to ``this`` tensor size.
  4790. Args:
  4791. size (int...): a sequence of integers defining the shape of the output tensor.
  4792. """,
  4793. )
  4794. add_docstr_all(
  4795. "zero_",
  4796. r"""
  4797. zero_() -> Tensor
  4798. Fills :attr:`self` tensor with zeros.
  4799. """,
  4800. )
  4801. add_docstr_all(
  4802. "matmul",
  4803. r"""
  4804. matmul(tensor2) -> Tensor
  4805. See :func:`torch.matmul`
  4806. """,
  4807. )
  4808. add_docstr_all(
  4809. "chunk",
  4810. r"""
  4811. chunk(chunks, dim=0) -> List of Tensors
  4812. See :func:`torch.chunk`
  4813. """,
  4814. )
  4815. add_docstr_all(
  4816. "unsafe_chunk",
  4817. r"""
  4818. unsafe_chunk(chunks, dim=0) -> List of Tensors
  4819. See :func:`torch.unsafe_chunk`
  4820. """,
  4821. )
  4822. add_docstr_all(
  4823. "unsafe_split",
  4824. r"""
  4825. unsafe_split(split_size, dim=0) -> List of Tensors
  4826. See :func:`torch.unsafe_split`
  4827. """,
  4828. )
  4829. add_docstr_all(
  4830. "tensor_split",
  4831. r"""
  4832. tensor_split(indices_or_sections, dim=0) -> List of Tensors
  4833. See :func:`torch.tensor_split`
  4834. """,
  4835. )
  4836. add_docstr_all(
  4837. "hsplit",
  4838. r"""
  4839. hsplit(split_size_or_sections) -> List of Tensors
  4840. See :func:`torch.hsplit`
  4841. """,
  4842. )
  4843. add_docstr_all(
  4844. "vsplit",
  4845. r"""
  4846. vsplit(split_size_or_sections) -> List of Tensors
  4847. See :func:`torch.vsplit`
  4848. """,
  4849. )
  4850. add_docstr_all(
  4851. "dsplit",
  4852. r"""
  4853. dsplit(split_size_or_sections) -> List of Tensors
  4854. See :func:`torch.dsplit`
  4855. """,
  4856. )
  4857. add_docstr_all(
  4858. "stft",
  4859. r"""
  4860. stft(frame_length, hop, fft_size=None, return_onesided=True, window=None, pad_end=0) -> Tensor
  4861. See :func:`torch.stft`
  4862. """,
  4863. )
  4864. add_docstr_all(
  4865. "istft",
  4866. r"""
  4867. istft(n_fft, hop_length=None, win_length=None, window=None,
  4868. center=True, normalized=False, onesided=True, length=None) -> Tensor
  4869. See :func:`torch.istft`
  4870. """,
  4871. )
  4872. add_docstr_all(
  4873. "det",
  4874. r"""
  4875. det() -> Tensor
  4876. See :func:`torch.det`
  4877. """,
  4878. )
  4879. add_docstr_all(
  4880. "where",
  4881. r"""
  4882. where(condition, y) -> Tensor
  4883. ``self.where(condition, y)`` is equivalent to ``torch.where(condition, self, y)``.
  4884. See :func:`torch.where`
  4885. """,
  4886. )
  4887. add_docstr_all(
  4888. "logdet",
  4889. r"""
  4890. logdet() -> Tensor
  4891. See :func:`torch.logdet`
  4892. """,
  4893. )
  4894. add_docstr_all(
  4895. "slogdet",
  4896. r"""
  4897. slogdet() -> (Tensor, Tensor)
  4898. See :func:`torch.slogdet`
  4899. """,
  4900. )
  4901. add_docstr_all(
  4902. "unbind",
  4903. r"""
  4904. unbind(dim=0) -> seq
  4905. See :func:`torch.unbind`
  4906. """,
  4907. )
  4908. add_docstr_all(
  4909. "pin_memory",
  4910. r"""
  4911. pin_memory() -> Tensor
  4912. Copies the tensor to pinned memory, if it's not already pinned.
  4913. """,
  4914. )
  4915. add_docstr_all(
  4916. "pinverse",
  4917. r"""
  4918. pinverse() -> Tensor
  4919. See :func:`torch.pinverse`
  4920. """,
  4921. )
  4922. add_docstr_all(
  4923. "index_add",
  4924. r"""
  4925. index_add(dim, index, source, *, alpha=1) -> Tensor
  4926. Out-of-place version of :meth:`torch.Tensor.index_add_`.
  4927. """,
  4928. )
  4929. add_docstr_all(
  4930. "index_copy",
  4931. r"""
  4932. index_copy(dim, index, tensor2) -> Tensor
  4933. Out-of-place version of :meth:`torch.Tensor.index_copy_`.
  4934. """,
  4935. )
  4936. add_docstr_all(
  4937. "index_fill",
  4938. r"""
  4939. index_fill(dim, index, value) -> Tensor
  4940. Out-of-place version of :meth:`torch.Tensor.index_fill_`.
  4941. """,
  4942. )
  4943. add_docstr_all(
  4944. "scatter",
  4945. r"""
  4946. scatter(dim, index, src) -> Tensor
  4947. Out-of-place version of :meth:`torch.Tensor.scatter_`
  4948. """,
  4949. )
  4950. add_docstr_all(
  4951. "scatter_add",
  4952. r"""
  4953. scatter_add(dim, index, src) -> Tensor
  4954. Out-of-place version of :meth:`torch.Tensor.scatter_add_`
  4955. """,
  4956. )
  4957. add_docstr_all(
  4958. "scatter_reduce",
  4959. r"""
  4960. scatter_reduce(dim, index, src, reduce, *, include_self=True) -> Tensor
  4961. Out-of-place version of :meth:`torch.Tensor.scatter_reduce_`
  4962. """,
  4963. )
  4964. add_docstr_all(
  4965. "masked_scatter",
  4966. r"""
  4967. masked_scatter(mask, tensor) -> Tensor
  4968. Out-of-place version of :meth:`torch.Tensor.masked_scatter_`
  4969. .. note::
  4970. The inputs :attr:`self` and :attr:`mask`
  4971. :ref:`broadcast <broadcasting-semantics>`.
  4972. Example:
  4973. >>> self = torch.tensor([0, 0, 0, 0, 0])
  4974. >>> mask = torch.tensor([[0, 0, 0, 1, 1], [1, 1, 0, 1, 1]])
  4975. >>> source = torch.tensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
  4976. >>> self.masked_scatter(mask, source)
  4977. tensor([[0, 0, 0, 0, 1],
  4978. [2, 3, 0, 4, 5]])
  4979. """,
  4980. )
  4981. add_docstr_all(
  4982. "xlogy",
  4983. r"""
  4984. xlogy(other) -> Tensor
  4985. See :func:`torch.xlogy`
  4986. """,
  4987. )
  4988. add_docstr_all(
  4989. "xlogy_",
  4990. r"""
  4991. xlogy_(other) -> Tensor
  4992. In-place version of :meth:`~Tensor.xlogy`
  4993. """,
  4994. )
  4995. add_docstr_all(
  4996. "masked_fill",
  4997. r"""
  4998. masked_fill(mask, value) -> Tensor
  4999. Out-of-place version of :meth:`torch.Tensor.masked_fill_`
  5000. """,
  5001. )
  5002. add_docstr_all(
  5003. "grad",
  5004. r"""
  5005. This attribute is ``None`` by default and becomes a Tensor the first time a call to
  5006. :func:`backward` computes gradients for ``self``.
  5007. The attribute will then contain the gradients computed and future calls to
  5008. :func:`backward` will accumulate (add) gradients into it.
  5009. """,
  5010. )
  5011. add_docstr_all(
  5012. "retain_grad",
  5013. r"""
  5014. retain_grad() -> None
  5015. Enables this Tensor to have their :attr:`grad` populated during
  5016. :func:`backward`. This is a no-op for leaf tensors.
  5017. """,
  5018. )
  5019. add_docstr_all(
  5020. "retains_grad",
  5021. r"""
  5022. Is ``True`` if this Tensor is non-leaf and its :attr:`grad` is enabled to be
  5023. populated during :func:`backward`, ``False`` otherwise.
  5024. """,
  5025. )
  5026. add_docstr_all(
  5027. "requires_grad",
  5028. r"""
  5029. Is ``True`` if gradients need to be computed for this Tensor, ``False`` otherwise.
  5030. .. note::
  5031. The fact that gradients need to be computed for a Tensor do not mean that the :attr:`grad`
  5032. attribute will be populated, see :attr:`is_leaf` for more details.
  5033. """,
  5034. )
  5035. add_docstr_all(
  5036. "is_leaf",
  5037. r"""
  5038. All Tensors that have :attr:`requires_grad` which is ``False`` will be leaf Tensors by convention.
  5039. For Tensors that have :attr:`requires_grad` which is ``True``, they will be leaf Tensors if they were
  5040. created by the user. This means that they are not the result of an operation and so
  5041. :attr:`grad_fn` is None.
  5042. Only leaf Tensors will have their :attr:`grad` populated during a call to :func:`backward`.
  5043. To get :attr:`grad` populated for non-leaf Tensors, you can use :func:`retain_grad`.
  5044. Example::
  5045. >>> a = torch.rand(10, requires_grad=True)
  5046. >>> a.is_leaf
  5047. True
  5048. >>> b = torch.rand(10, requires_grad=True).cuda()
  5049. >>> b.is_leaf
  5050. False
  5051. # b was created by the operation that cast a cpu Tensor into a cuda Tensor
  5052. >>> c = torch.rand(10, requires_grad=True) + 2
  5053. >>> c.is_leaf
  5054. False
  5055. # c was created by the addition operation
  5056. >>> d = torch.rand(10).cuda()
  5057. >>> d.is_leaf
  5058. True
  5059. # d does not require gradients and so has no operation creating it (that is tracked by the autograd engine)
  5060. >>> e = torch.rand(10).cuda().requires_grad_()
  5061. >>> e.is_leaf
  5062. True
  5063. # e requires gradients and has no operations creating it
  5064. >>> f = torch.rand(10, requires_grad=True, device="cuda")
  5065. >>> f.is_leaf
  5066. True
  5067. # f requires grad, has no operation creating it
  5068. """,
  5069. )
  5070. add_docstr_all(
  5071. "names",
  5072. r"""
  5073. Stores names for each of this tensor's dimensions.
  5074. ``names[idx]`` corresponds to the name of tensor dimension ``idx``.
  5075. Names are either a string if the dimension is named or ``None`` if the
  5076. dimension is unnamed.
  5077. Dimension names may contain characters or underscore. Furthermore, a dimension
  5078. name must be a valid Python variable name (i.e., does not start with underscore).
  5079. Tensors may not have two named dimensions with the same name.
  5080. .. warning::
  5081. The named tensor API is experimental and subject to change.
  5082. """,
  5083. )
  5084. add_docstr_all(
  5085. "is_cuda",
  5086. r"""
  5087. Is ``True`` if the Tensor is stored on the GPU, ``False`` otherwise.
  5088. """,
  5089. )
  5090. add_docstr_all(
  5091. "is_cpu",
  5092. r"""
  5093. Is ``True`` if the Tensor is stored on the CPU, ``False`` otherwise.
  5094. """,
  5095. )
  5096. add_docstr_all(
  5097. "is_ipu",
  5098. r"""
  5099. Is ``True`` if the Tensor is stored on the IPU, ``False`` otherwise.
  5100. """,
  5101. )
  5102. add_docstr_all(
  5103. "is_xpu",
  5104. r"""
  5105. Is ``True`` if the Tensor is stored on the XPU, ``False`` otherwise.
  5106. """,
  5107. )
  5108. add_docstr_all(
  5109. "is_quantized",
  5110. r"""
  5111. Is ``True`` if the Tensor is quantized, ``False`` otherwise.
  5112. """,
  5113. )
  5114. add_docstr_all(
  5115. "is_meta",
  5116. r"""
  5117. Is ``True`` if the Tensor is a meta tensor, ``False`` otherwise. Meta tensors
  5118. are like normal tensors, but they carry no data.
  5119. """,
  5120. )
  5121. add_docstr_all(
  5122. "is_mps",
  5123. r"""
  5124. Is ``True`` if the Tensor is stored on the MPS device, ``False`` otherwise.
  5125. """,
  5126. )
  5127. add_docstr_all(
  5128. "is_sparse",
  5129. r"""
  5130. Is ``True`` if the Tensor uses sparse storage layout, ``False`` otherwise.
  5131. """,
  5132. )
  5133. add_docstr_all(
  5134. "is_sparse_csr",
  5135. r"""
  5136. Is ``True`` if the Tensor uses sparse CSR storage layout, ``False`` otherwise.
  5137. """,
  5138. )
  5139. add_docstr_all(
  5140. "device",
  5141. r"""
  5142. Is the :class:`torch.device` where this Tensor is.
  5143. """,
  5144. )
  5145. add_docstr_all(
  5146. "ndim",
  5147. r"""
  5148. Alias for :meth:`~Tensor.dim()`
  5149. """,
  5150. )
  5151. add_docstr_all(
  5152. "T",
  5153. r"""
  5154. Returns a view of this tensor with its dimensions reversed.
  5155. If ``n`` is the number of dimensions in ``x``,
  5156. ``x.T`` is equivalent to ``x.permute(n-1, n-2, ..., 0)``.
  5157. .. warning::
  5158. The use of :func:`Tensor.T` on tensors of dimension other than 2 to reverse their shape
  5159. is deprecated and it will throw an error in a future release. Consider :attr:`~.Tensor.mT`
  5160. to transpose batches of matrices or `x.permute(*torch.arange(x.ndim - 1, -1, -1))` to reverse
  5161. the dimensions of a tensor.
  5162. """,
  5163. )
  5164. add_docstr_all(
  5165. "H",
  5166. r"""
  5167. Returns a view of a matrix (2-D tensor) conjugated and transposed.
  5168. ``x.H`` is equivalent to ``x.transpose(0, 1).conj()`` for complex matrices and
  5169. ``x.transpose(0, 1)`` for real matrices.
  5170. .. seealso::
  5171. :attr:`~.Tensor.mH`: An attribute that also works on batches of matrices.
  5172. """,
  5173. )
  5174. add_docstr_all(
  5175. "mT",
  5176. r"""
  5177. Returns a view of this tensor with the last two dimensions transposed.
  5178. ``x.mT`` is equivalent to ``x.transpose(-2, -1)``.
  5179. """,
  5180. )
  5181. add_docstr_all(
  5182. "mH",
  5183. r"""
  5184. Accessing this property is equivalent to calling :func:`adjoint`.
  5185. """,
  5186. )
  5187. add_docstr_all(
  5188. "adjoint",
  5189. r"""
  5190. adjoint() -> Tensor
  5191. Alias for :func:`adjoint`
  5192. """,
  5193. )
  5194. add_docstr_all(
  5195. "real",
  5196. r"""
  5197. Returns a new tensor containing real values of the :attr:`self` tensor for a complex-valued input tensor.
  5198. The returned tensor and :attr:`self` share the same underlying storage.
  5199. Returns :attr:`self` if :attr:`self` is a real-valued tensor tensor.
  5200. Example::
  5201. >>> x=torch.randn(4, dtype=torch.cfloat)
  5202. >>> x
  5203. tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)])
  5204. >>> x.real
  5205. tensor([ 0.3100, -0.5445, -1.6492, -0.0638])
  5206. """,
  5207. )
  5208. add_docstr_all(
  5209. "imag",
  5210. r"""
  5211. Returns a new tensor containing imaginary values of the :attr:`self` tensor.
  5212. The returned tensor and :attr:`self` share the same underlying storage.
  5213. .. warning::
  5214. :func:`imag` is only supported for tensors with complex dtypes.
  5215. Example::
  5216. >>> x=torch.randn(4, dtype=torch.cfloat)
  5217. >>> x
  5218. tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)])
  5219. >>> x.imag
  5220. tensor([ 0.3553, -0.7896, -0.0633, -0.8119])
  5221. """,
  5222. )
  5223. add_docstr_all(
  5224. "as_subclass",
  5225. r"""
  5226. as_subclass(cls) -> Tensor
  5227. Makes a ``cls`` instance with the same data pointer as ``self``. Changes
  5228. in the output mirror changes in ``self``, and the output stays attached
  5229. to the autograd graph. ``cls`` must be a subclass of ``Tensor``.
  5230. """,
  5231. )
  5232. add_docstr_all(
  5233. "crow_indices",
  5234. r"""
  5235. crow_indices() -> IntTensor
  5236. Returns the tensor containing the compressed row indices of the :attr:`self`
  5237. tensor when :attr:`self` is a sparse CSR tensor of layout ``sparse_csr``.
  5238. The ``crow_indices`` tensor is strictly of shape (:attr:`self`.size(0) + 1)
  5239. and of type ``int32`` or ``int64``. When using MKL routines such as sparse
  5240. matrix multiplication, it is necessary to use ``int32`` indexing in order
  5241. to avoid downcasting and potentially losing information.
  5242. Example::
  5243. >>> csr = torch.eye(5,5).to_sparse_csr()
  5244. >>> csr.crow_indices()
  5245. tensor([0, 1, 2, 3, 4, 5], dtype=torch.int32)
  5246. """,
  5247. )
  5248. add_docstr_all(
  5249. "col_indices",
  5250. r"""
  5251. col_indices() -> IntTensor
  5252. Returns the tensor containing the column indices of the :attr:`self`
  5253. tensor when :attr:`self` is a sparse CSR tensor of layout ``sparse_csr``.
  5254. The ``col_indices`` tensor is strictly of shape (:attr:`self`.nnz())
  5255. and of type ``int32`` or ``int64``. When using MKL routines such as sparse
  5256. matrix multiplication, it is necessary to use ``int32`` indexing in order
  5257. to avoid downcasting and potentially losing information.
  5258. Example::
  5259. >>> csr = torch.eye(5,5).to_sparse_csr()
  5260. >>> csr.col_indices()
  5261. tensor([0, 1, 2, 3, 4], dtype=torch.int32)
  5262. """,
  5263. )
  5264. add_docstr_all(
  5265. "to_padded_tensor",
  5266. r"""
  5267. to_padded_tensor(padding, output_size=None) -> Tensor
  5268. See :func:`to_padded_tensor`
  5269. """,
  5270. )