_torch_docs.py 397 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906109071090810909109101091110912109131091410915109161091710918109191092010921109221092310924109251092610927109281092910930109311093210933109341093510936109371093810939109401094110942109431094410945109461094710948109491095010951109521095310954109551095610957109581095910960109611096210963109641096510966109671096810969109701097110972109731097410975109761097710978109791098010981109821098310984109851098610987109881098910990109911099210993109941099510996109971099810999110001100111002110031100411005110061100711008110091101011011110121101311014110151101611017110181101911020110211102211023110241102511026110271102811029110301103111032110331103411035110361103711038110391104011041110421104311044110451104611047110481104911050110511105211053110541105511056110571105811059110601106111062110631106411065110661106711068110691107011071110721107311074110751107611077110781107911080110811108211083110841108511086110871108811089110901109111092110931109411095110961109711098110991110011101111021110311104111051110611107111081110911110111111111211113111141111511116111171111811119111201112111122111231112411125111261112711128111291113011131111321113311134111351113611137111381113911140111411114211143111441114511146111471114811149111501115111152111531115411155111561115711158111591116011161111621116311164111651116611167111681116911170111711117211173111741117511176111771117811179111801118111182111831118411185111861118711188111891119011191111921119311194111951119611197111981119911200112011120211203112041120511206112071120811209112101121111212112131121411215112161121711218112191122011221112221122311224112251122611227112281122911230112311123211233112341123511236112371123811239112401124111242112431124411245112461124711248112491125011251112521125311254112551125611257112581125911260112611126211263112641126511266112671126811269112701127111272112731127411275112761127711278112791128011281112821128311284112851128611287112881128911290112911129211293112941129511296112971129811299113001130111302113031130411305113061130711308113091131011311113121131311314113151131611317113181131911320113211132211323113241132511326113271132811329113301133111332113331133411335113361133711338113391134011341113421134311344113451134611347113481134911350113511135211353113541135511356113571135811359113601136111362113631136411365113661136711368113691137011371113721137311374113751137611377113781137911380113811138211383113841138511386113871138811389113901139111392113931139411395113961139711398113991140011401114021140311404114051140611407114081140911410114111141211413114141141511416114171141811419114201142111422114231142411425114261142711428114291143011431114321143311434114351143611437114381143911440114411144211443114441144511446114471144811449114501145111452114531145411455114561145711458114591146011461114621146311464114651146611467114681146911470114711147211473114741147511476114771147811479114801148111482114831148411485114861148711488114891149011491114921149311494114951149611497114981149911500115011150211503115041150511506115071150811509115101151111512115131151411515115161151711518115191152011521115221152311524115251152611527115281152911530115311153211533115341153511536115371153811539115401154111542115431154411545115461154711548115491155011551115521155311554115551155611557115581155911560115611156211563115641156511566115671156811569115701157111572115731157411575115761157711578115791158011581115821158311584115851158611587115881158911590115911159211593115941159511596115971159811599116001160111602116031160411605116061160711608116091161011611116121161311614116151161611617116181161911620116211162211623116241162511626116271162811629116301163111632116331163411635116361163711638116391164011641116421164311644116451164611647116481164911650116511165211653116541165511656116571165811659116601166111662116631166411665116661166711668116691167011671116721167311674116751167611677116781167911680116811168211683116841168511686116871168811689116901169111692116931169411695116961169711698116991170011701117021170311704117051170611707117081170911710117111171211713117141171511716117171171811719117201172111722117231172411725117261172711728117291173011731117321173311734117351173611737117381173911740117411174211743117441174511746117471174811749117501175111752117531175411755117561175711758117591176011761117621176311764117651176611767117681176911770117711177211773117741177511776117771177811779117801178111782117831178411785117861178711788117891179011791117921179311794117951179611797117981179911800118011180211803118041180511806118071180811809118101181111812118131181411815118161181711818118191182011821118221182311824118251182611827118281182911830118311183211833118341183511836118371183811839118401184111842118431184411845118461184711848118491185011851118521185311854118551185611857118581185911860118611186211863118641186511866118671186811869118701187111872118731187411875118761187711878118791188011881118821188311884118851188611887118881188911890118911189211893118941189511896118971189811899119001190111902119031190411905119061190711908119091191011911119121191311914119151191611917119181191911920119211192211923119241192511926119271192811929119301193111932119331193411935119361193711938119391194011941119421194311944119451194611947119481194911950119511195211953119541195511956119571195811959119601196111962119631196411965119661196711968119691197011971119721197311974119751197611977119781197911980119811198211983119841198511986119871198811989119901199111992119931199411995119961199711998119991200012001120021200312004120051200612007120081200912010120111201212013120141201512016120171201812019120201202112022120231202412025120261202712028120291203012031120321203312034120351203612037120381203912040120411204212043120441204512046120471204812049120501205112052120531205412055120561205712058120591206012061120621206312064120651206612067120681206912070120711207212073120741207512076120771207812079120801208112082120831208412085120861208712088120891209012091120921209312094120951209612097120981209912100121011210212103121041210512106121071210812109121101211112112121131211412115121161211712118121191212012121121221212312124121251212612127121281212912130121311213212133121341213512136121371213812139121401214112142121431214412145121461214712148121491215012151121521215312154121551215612157121581215912160121611216212163121641216512166121671216812169121701217112172121731217412175121761217712178121791218012181121821218312184121851218612187121881218912190121911219212193121941219512196121971219812199122001220112202122031220412205122061220712208122091221012211122121221312214122151221612217122181221912220122211222212223122241222512226122271222812229122301223112232122331223412235122361223712238122391224012241122421224312244122451224612247122481224912250122511225212253122541225512256122571225812259122601226112262122631226412265122661226712268122691227012271122721227312274122751227612277122781227912280122811228212283122841228512286122871228812289122901229112292122931229412295122961229712298122991230012301123021230312304123051230612307123081230912310123111231212313123141231512316123171231812319123201232112322123231232412325123261232712328123291233012331123321233312334123351233612337123381233912340123411234212343123441234512346123471234812349123501235112352123531235412355123561235712358123591236012361123621236312364123651236612367123681236912370123711237212373123741237512376123771237812379123801238112382123831238412385123861238712388123891239012391123921239312394123951239612397123981239912400124011240212403124041240512406124071240812409124101241112412124131241412415124161241712418124191242012421124221242312424124251242612427124281242912430124311243212433124341243512436124371243812439124401244112442124431244412445124461244712448124491245012451124521245312454124551245612457124581245912460124611246212463124641246512466124671246812469124701247112472124731247412475124761247712478124791248012481124821248312484124851248612487124881248912490124911249212493124941249512496124971249812499125001250112502125031250412505125061250712508125091251012511125121251312514125151251612517125181251912520125211252212523125241252512526125271252812529125301253112532125331253412535125361253712538125391254012541125421254312544125451254612547125481254912550125511255212553125541255512556125571255812559125601256112562125631256412565125661256712568125691257012571125721257312574125751257612577125781257912580125811258212583125841258512586125871258812589125901259112592125931259412595125961259712598125991260012601126021260312604126051260612607126081260912610126111261212613126141261512616126171261812619126201262112622126231262412625126261262712628126291263012631126321263312634126351263612637126381263912640126411264212643126441264512646126471264812649126501265112652126531265412655126561265712658126591266012661126621266312664126651266612667126681266912670126711267212673126741267512676126771267812679126801268112682126831268412685126861268712688126891269012691126921269312694126951269612697126981269912700127011270212703127041270512706127071270812709127101271112712127131271412715127161271712718127191272012721127221272312724127251272612727127281272912730127311273212733127341273512736127371273812739127401274112742127431274412745127461274712748127491275012751127521275312754127551275612757127581275912760127611276212763127641276512766127671276812769127701277112772127731277412775127761277712778127791278012781127821278312784127851278612787127881278912790127911279212793127941279512796127971279812799128001280112802128031280412805128061280712808128091281012811128121281312814128151281612817128181281912820128211282212823128241282512826128271282812829128301283112832128331283412835128361283712838128391284012841128421284312844128451284612847128481284912850128511285212853128541285512856128571285812859128601286112862128631286412865128661286712868128691287012871128721287312874128751287612877128781287912880128811288212883128841288512886128871288812889128901289112892128931289412895128961289712898128991290012901129021290312904129051290612907129081290912910129111291212913129141291512916129171291812919129201292112922129231292412925129261292712928129291293012931129321293312934129351293612937129381293912940129411294212943129441294512946129471294812949129501295112952129531295412955129561295712958129591296012961129621296312964129651296612967129681296912970129711297212973129741297512976129771297812979129801298112982129831298412985129861298712988129891299012991129921299312994129951299612997129981299913000130011300213003130041300513006130071300813009130101301113012130131301413015130161301713018130191302013021130221302313024130251302613027130281302913030130311303213033130341303513036130371303813039130401304113042130431304413045130461304713048130491305013051130521305313054130551305613057130581305913060130611306213063130641306513066130671306813069130701307113072130731307413075130761307713078130791308013081130821308313084130851308613087130881308913090130911309213093130941309513096130971309813099131001310113102131031310413105131061310713108131091311013111131121311313114131151311613117131181311913120131211312213123131241312513126131271312813129131301313113132131331313413135131361313713138131391314013141131421314313144131451314613147131481314913150131511315213153131541315513156131571315813159131601316113162131631316413165131661316713168131691317013171131721317313174131751317613177131781317913180131811318213183131841318513186131871318813189131901319113192131931319413195131961319713198131991320013201132021320313204132051320613207132081320913210132111321213213132141321513216132171321813219132201322113222132231322413225132261322713228132291323013231132321323313234132351323613237132381323913240132411324213243132441324513246132471324813249132501325113252132531325413255132561325713258132591326013261132621326313264132651326613267132681326913270132711327213273132741327513276132771327813279132801328113282132831328413285132861328713288132891329013291132921329313294132951329613297132981329913300133011330213303133041330513306133071330813309133101331113312133131331413315133161331713318133191332013321133221332313324133251332613327133281332913330133311333213333133341333513336133371333813339133401334113342133431334413345133461334713348133491335013351133521335313354133551335613357133581335913360133611336213363133641336513366133671336813369133701337113372133731337413375133761337713378133791338013381133821338313384133851338613387133881338913390133911339213393133941339513396133971339813399134001340113402134031340413405134061340713408134091341013411134121341313414134151341613417134181341913420134211342213423134241342513426134271342813429134301343113432134331343413435134361343713438134391344013441134421344313444134451344613447134481344913450134511345213453134541345513456134571345813459134601346113462134631346413465134661346713468134691347013471134721347313474134751347613477134781347913480134811348213483134841348513486134871348813489134901349113492134931349413495134961349713498134991350013501135021350313504135051350613507135081350913510135111351213513135141351513516135171351813519135201352113522135231352413525135261352713528135291353013531135321353313534135351353613537135381353913540135411354213543135441354513546135471354813549135501355113552135531355413555135561355713558135591356013561135621356313564135651356613567135681356913570135711357213573135741357513576135771357813579135801358113582135831358413585135861358713588135891359013591135921359313594135951359613597135981359913600136011360213603136041360513606136071360813609136101361113612136131361413615136161361713618136191362013621136221362313624136251362613627136281362913630136311363213633136341363513636136371363813639136401364113642136431364413645136461364713648136491365013651136521365313654136551365613657136581365913660136611366213663136641366513666136671366813669136701367113672136731367413675136761367713678136791368013681136821368313684136851368613687136881368913690136911369213693136941369513696136971369813699137001370113702137031370413705137061370713708137091371013711137121371313714137151371613717137181371913720137211372213723137241372513726137271372813729137301373113732137331373413735137361373713738137391374013741137421374313744137451374613747137481374913750137511375213753137541375513756137571375813759137601376113762137631376413765137661376713768137691377013771137721377313774137751377613777137781377913780137811378213783137841378513786137871378813789137901379113792137931379413795137961379713798137991380013801138021380313804138051380613807138081380913810138111381213813138141381513816138171381813819138201382113822138231382413825138261382713828138291383013831138321383313834138351383613837138381383913840138411384213843138441384513846138471384813849138501385113852138531385413855138561385713858138591386013861138621386313864138651386613867138681386913870138711387213873138741387513876138771387813879138801388113882138831388413885138861388713888138891389013891138921389313894138951389613897138981389913900139011390213903139041390513906139071390813909139101391113912139131391413915139161391713918139191392013921139221392313924139251392613927139281392913930139311393213933139341393513936139371393813939139401394113942139431394413945139461394713948139491395013951139521395313954139551395613957139581395913960139611396213963139641396513966139671396813969139701397113972139731397413975
  1. # -*- coding: utf-8 -*-
  2. """Adds docstrings to functions defined in the torch._C"""
  3. import re
  4. import torch._C
  5. from torch._C import _add_docstr as add_docstr
  6. def parse_kwargs(desc):
  7. """Maps a description of args to a dictionary of {argname: description}.
  8. Input:
  9. (' weight (Tensor): a weight tensor\n' +
  10. ' Some optional description')
  11. Output: {
  12. 'weight': \
  13. 'weight (Tensor): a weight tensor\n Some optional description'
  14. }
  15. """
  16. # Split on exactly 4 spaces after a newline
  17. regx = re.compile(r"\n\s{4}(?!\s)")
  18. kwargs = [section.strip() for section in regx.split(desc)]
  19. kwargs = [section for section in kwargs if len(section) > 0]
  20. return {desc.split(" ")[0]: desc for desc in kwargs}
  21. def merge_dicts(*dicts):
  22. return {x: d[x] for d in dicts for x in d}
  23. common_args = parse_kwargs(
  24. """
  25. input (Tensor): the input tensor.
  26. generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
  27. out (Tensor, optional): the output tensor.
  28. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  29. returned tensor. Default: ``torch.preserve_format``.
  30. """
  31. )
  32. reduceops_common_args = merge_dicts(
  33. common_args,
  34. parse_kwargs(
  35. """
  36. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  37. If specified, the input tensor is casted to :attr:`dtype` before the operation
  38. is performed. This is useful for preventing data type overflows. Default: None.
  39. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  40. """
  41. ),
  42. )
  43. multi_dim_common = merge_dicts(
  44. reduceops_common_args,
  45. parse_kwargs(
  46. """
  47. dim (int or tuple of ints): the dimension or dimensions to reduce.
  48. """
  49. ),
  50. {
  51. "keepdim_details": """
  52. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  53. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  54. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  55. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  56. """
  57. },
  58. {
  59. "opt_dim": """
  60. dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
  61. If ``None``, all dimensions are reduced.
  62. """
  63. },
  64. )
  65. single_dim_common = merge_dicts(
  66. reduceops_common_args,
  67. parse_kwargs(
  68. """
  69. dim (int): the dimension to reduce.
  70. """
  71. ),
  72. {
  73. "keepdim_details": """If :attr:`keepdim` is ``True``, the output tensor is of the same size
  74. as :attr:`input` except in the dimension :attr:`dim` where it is of size 1.
  75. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
  76. the output tensor having 1 fewer dimension than :attr:`input`."""
  77. },
  78. )
  79. factory_common_args = merge_dicts(
  80. common_args,
  81. parse_kwargs(
  82. """
  83. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  84. Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`).
  85. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  86. Default: ``torch.strided``.
  87. device (:class:`torch.device`, optional): the desired device of returned tensor.
  88. Default: if ``None``, uses the current device for the default tensor type
  89. (see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU
  90. for CPU tensor types and the current CUDA device for CUDA tensor types.
  91. requires_grad (bool, optional): If autograd should record operations on the
  92. returned tensor. Default: ``False``.
  93. pin_memory (bool, optional): If set, returned tensor would be allocated in
  94. the pinned memory. Works only for CPU tensors. Default: ``False``.
  95. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  96. returned Tensor. Default: ``torch.contiguous_format``.
  97. check_invariants (bool, optional): If sparse tensor invariants are checked.
  98. Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`,
  99. initially False.
  100. """
  101. ),
  102. {
  103. "sparse_factory_device_note": """\
  104. .. note::
  105. If the ``device`` argument is not specified the device of the given
  106. :attr:`values` and indices tensor(s) must match. If, however, the
  107. argument is specified the input Tensors will be converted to the
  108. given device and in turn determine the device of the constructed
  109. sparse tensor."""
  110. },
  111. )
  112. factory_like_common_args = parse_kwargs(
  113. """
  114. input (Tensor): the size of :attr:`input` will determine size of the output tensor.
  115. layout (:class:`torch.layout`, optional): the desired layout of returned tensor.
  116. Default: if ``None``, defaults to the layout of :attr:`input`.
  117. dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor.
  118. Default: if ``None``, defaults to the dtype of :attr:`input`.
  119. device (:class:`torch.device`, optional): the desired device of returned tensor.
  120. Default: if ``None``, defaults to the device of :attr:`input`.
  121. requires_grad (bool, optional): If autograd should record operations on the
  122. returned tensor. Default: ``False``.
  123. pin_memory (bool, optional): If set, returned tensor would be allocated in
  124. the pinned memory. Works only for CPU tensors. Default: ``False``.
  125. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  126. returned Tensor. Default: ``torch.preserve_format``.
  127. """
  128. )
  129. factory_data_common_args = parse_kwargs(
  130. """
  131. data (array_like): Initial data for the tensor. Can be a list, tuple,
  132. NumPy ``ndarray``, scalar, and other types.
  133. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  134. Default: if ``None``, infers data type from :attr:`data`.
  135. device (:class:`torch.device`, optional): the desired device of returned tensor.
  136. Default: if ``None``, uses the current device for the default tensor type
  137. (see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU
  138. for CPU tensor types and the current CUDA device for CUDA tensor types.
  139. requires_grad (bool, optional): If autograd should record operations on the
  140. returned tensor. Default: ``False``.
  141. pin_memory (bool, optional): If set, returned tensor would be allocated in
  142. the pinned memory. Works only for CPU tensors. Default: ``False``.
  143. """
  144. )
  145. tf32_notes = {
  146. "tf32_note": """This operator supports :ref:`TensorFloat32<tf32_on_ampere>`."""
  147. }
  148. rocm_fp16_notes = {
  149. "rocm_fp16_note": """On certain ROCm devices, when using float16 inputs this module will use \
  150. :ref:`different precision<fp16_on_mi200>` for backward."""
  151. }
  152. reproducibility_notes = {
  153. "forward_reproducibility_note": """This operation may behave nondeterministically when given tensors on \
  154. a CUDA device. See :doc:`/notes/randomness` for more information.""",
  155. "backward_reproducibility_note": """This operation may produce nondeterministic gradients when given tensors on \
  156. a CUDA device. See :doc:`/notes/randomness` for more information.""",
  157. "cudnn_reproducibility_note": """In some circumstances when given tensors on a CUDA device \
  158. and using CuDNN, this operator may select a nondeterministic algorithm to increase performance. If this is \
  159. undesirable, you can try to make the operation deterministic (potentially at \
  160. a performance cost) by setting ``torch.backends.cudnn.deterministic = True``. \
  161. See :doc:`/notes/randomness` for more information.""",
  162. }
  163. sparse_support_notes = {
  164. "sparse_beta_warning": """
  165. .. warning::
  166. Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported,
  167. or may not have autograd support. If you notice missing functionality please
  168. open a feature request.""",
  169. }
  170. add_docstr(
  171. torch.abs,
  172. r"""
  173. abs(input, *, out=None) -> Tensor
  174. Computes the absolute value of each element in :attr:`input`.
  175. .. math::
  176. \text{out}_{i} = |\text{input}_{i}|
  177. """
  178. + r"""
  179. Args:
  180. {input}
  181. Keyword args:
  182. {out}
  183. Example::
  184. >>> torch.abs(torch.tensor([-1, -2, 3]))
  185. tensor([ 1, 2, 3])
  186. """.format(
  187. **common_args
  188. ),
  189. )
  190. add_docstr(
  191. torch.absolute,
  192. r"""
  193. absolute(input, *, out=None) -> Tensor
  194. Alias for :func:`torch.abs`
  195. """,
  196. )
  197. add_docstr(
  198. torch.acos,
  199. r"""
  200. acos(input, *, out=None) -> Tensor
  201. Computes the inverse cosine of each element in :attr:`input`.
  202. .. math::
  203. \text{out}_{i} = \cos^{-1}(\text{input}_{i})
  204. """
  205. + r"""
  206. Args:
  207. {input}
  208. Keyword args:
  209. {out}
  210. Example::
  211. >>> a = torch.randn(4)
  212. >>> a
  213. tensor([ 0.3348, -0.5889, 0.2005, -0.1584])
  214. >>> torch.acos(a)
  215. tensor([ 1.2294, 2.2004, 1.3690, 1.7298])
  216. """.format(
  217. **common_args
  218. ),
  219. )
  220. add_docstr(
  221. torch.arccos,
  222. r"""
  223. arccos(input, *, out=None) -> Tensor
  224. Alias for :func:`torch.acos`.
  225. """,
  226. )
  227. add_docstr(
  228. torch.acosh,
  229. r"""
  230. acosh(input, *, out=None) -> Tensor
  231. Returns a new tensor with the inverse hyperbolic cosine of the elements of :attr:`input`.
  232. .. math::
  233. \text{out}_{i} = \cosh^{-1}(\text{input}_{i})
  234. Note:
  235. The domain of the inverse hyperbolic cosine is `[1, inf)` and values outside this range
  236. will be mapped to ``NaN``, except for `+ INF` for which the output is mapped to `+ INF`.
  237. """
  238. + r"""
  239. Args:
  240. {input}
  241. Keyword arguments:
  242. {out}
  243. Example::
  244. >>> a = torch.randn(4).uniform_(1, 2)
  245. >>> a
  246. tensor([ 1.3192, 1.9915, 1.9674, 1.7151 ])
  247. >>> torch.acosh(a)
  248. tensor([ 0.7791, 1.3120, 1.2979, 1.1341 ])
  249. """.format(
  250. **common_args
  251. ),
  252. )
  253. add_docstr(
  254. torch.arccosh,
  255. r"""
  256. arccosh(input, *, out=None) -> Tensor
  257. Alias for :func:`torch.acosh`.
  258. """,
  259. )
  260. add_docstr(
  261. torch.index_add,
  262. r"""
  263. index_add(input, dim, index, source, *, alpha=1, out=None) -> Tensor
  264. See :meth:`~Tensor.index_add_` for function description.
  265. """,
  266. )
  267. add_docstr(
  268. torch.index_copy,
  269. r"""
  270. index_copy(input, dim, index, source, *, out=None) -> Tensor
  271. See :meth:`~Tensor.index_add_` for function description.
  272. """,
  273. )
  274. add_docstr(
  275. torch.index_reduce,
  276. r"""
  277. index_reduce(input, dim, index, source, reduce, *, include_self=True, out=None) -> Tensor
  278. See :meth:`~Tensor.index_reduce_` for function description.
  279. """,
  280. )
  281. add_docstr(
  282. torch.add,
  283. r"""
  284. add(input, other, *, alpha=1, out=None) -> Tensor
  285. Adds :attr:`other`, scaled by :attr:`alpha`, to :attr:`input`.
  286. .. math::
  287. \text{{out}}_i = \text{{input}}_i + \text{{alpha}} \times \text{{other}}_i
  288. """
  289. + r"""
  290. Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
  291. :ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
  292. Args:
  293. {input}
  294. other (Tensor or Number): the tensor or number to add to :attr:`input`.
  295. Keyword arguments:
  296. alpha (Number): the multiplier for :attr:`other`.
  297. {out}
  298. Examples::
  299. >>> a = torch.randn(4)
  300. >>> a
  301. tensor([ 0.0202, 1.0985, 1.3506, -0.6056])
  302. >>> torch.add(a, 20)
  303. tensor([ 20.0202, 21.0985, 21.3506, 19.3944])
  304. >>> b = torch.randn(4)
  305. >>> b
  306. tensor([-0.9732, -0.3497, 0.6245, 0.4022])
  307. >>> c = torch.randn(4, 1)
  308. >>> c
  309. tensor([[ 0.3743],
  310. [-1.7724],
  311. [-0.5811],
  312. [-0.8017]])
  313. >>> torch.add(b, c, alpha=10)
  314. tensor([[ 2.7695, 3.3930, 4.3672, 4.1450],
  315. [-18.6971, -18.0736, -17.0994, -17.3216],
  316. [ -6.7845, -6.1610, -5.1868, -5.4090],
  317. [ -8.9902, -8.3667, -7.3925, -7.6147]])
  318. """.format(
  319. **common_args
  320. ),
  321. )
  322. add_docstr(
  323. torch.addbmm,
  324. r"""
  325. addbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
  326. Performs a batch matrix-matrix product of matrices stored
  327. in :attr:`batch1` and :attr:`batch2`,
  328. with a reduced add step (all matrix multiplications get accumulated
  329. along the first dimension).
  330. :attr:`input` is added to the final result.
  331. :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the
  332. same number of matrices.
  333. If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
  334. :math:`(b \times m \times p)` tensor, :attr:`input` must be
  335. :ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
  336. and :attr:`out` will be a :math:`(n \times p)` tensor.
  337. .. math::
  338. out = \beta\ \text{input} + \alpha\ (\sum_{i=0}^{b-1} \text{batch1}_i \mathbin{@} \text{batch2}_i)
  339. If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
  340. it will not be propagated.
  341. """
  342. + r"""
  343. For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and :attr:`alpha`
  344. must be real numbers, otherwise they should be integers.
  345. {tf32_note}
  346. {rocm_fp16_note}
  347. Args:
  348. batch1 (Tensor): the first batch of matrices to be multiplied
  349. batch2 (Tensor): the second batch of matrices to be multiplied
  350. Keyword args:
  351. beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
  352. input (Tensor): matrix to be added
  353. alpha (Number, optional): multiplier for `batch1 @ batch2` (:math:`\alpha`)
  354. {out}
  355. Example::
  356. >>> M = torch.randn(3, 5)
  357. >>> batch1 = torch.randn(10, 3, 4)
  358. >>> batch2 = torch.randn(10, 4, 5)
  359. >>> torch.addbmm(M, batch1, batch2)
  360. tensor([[ 6.6311, 0.0503, 6.9768, -12.0362, -2.1653],
  361. [ -4.8185, -1.4255, -6.6760, 8.9453, 2.5743],
  362. [ -3.8202, 4.3691, 1.0943, -1.1109, 5.4730]])
  363. """.format(
  364. **common_args, **tf32_notes, **rocm_fp16_notes
  365. ),
  366. )
  367. add_docstr(
  368. torch.addcdiv,
  369. r"""
  370. addcdiv(input, tensor1, tensor2, *, value=1, out=None) -> Tensor
  371. Performs the element-wise division of :attr:`tensor1` by :attr:`tensor2`,
  372. multiplies the result by the scalar :attr:`value` and adds it to :attr:`input`.
  373. .. warning::
  374. Integer division with addcdiv is no longer supported, and in a future
  375. release addcdiv will perform a true division of tensor1 and tensor2.
  376. The historic addcdiv behavior can be implemented as
  377. (input + value * torch.trunc(tensor1 / tensor2)).to(input.dtype)
  378. for integer inputs and as (input + value * tensor1 / tensor2) for float inputs.
  379. The future addcdiv behavior is just the latter implementation:
  380. (input + value * tensor1 / tensor2), for all dtypes.
  381. .. math::
  382. \text{out}_i = \text{input}_i + \text{value} \times \frac{\text{tensor1}_i}{\text{tensor2}_i}
  383. """
  384. + r"""
  385. The shapes of :attr:`input`, :attr:`tensor1`, and :attr:`tensor2` must be
  386. :ref:`broadcastable <broadcasting-semantics>`.
  387. For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be
  388. a real number, otherwise an integer.
  389. Args:
  390. input (Tensor): the tensor to be added
  391. tensor1 (Tensor): the numerator tensor
  392. tensor2 (Tensor): the denominator tensor
  393. Keyword args:
  394. value (Number, optional): multiplier for :math:`\text{{tensor1}} / \text{{tensor2}}`
  395. {out}
  396. Example::
  397. >>> t = torch.randn(1, 3)
  398. >>> t1 = torch.randn(3, 1)
  399. >>> t2 = torch.randn(1, 3)
  400. >>> torch.addcdiv(t, t1, t2, value=0.1)
  401. tensor([[-0.2312, -3.6496, 0.1312],
  402. [-1.0428, 3.4292, -0.1030],
  403. [-0.5369, -0.9829, 0.0430]])
  404. """.format(
  405. **common_args
  406. ),
  407. )
  408. add_docstr(
  409. torch.addcmul,
  410. r"""
  411. addcmul(input, tensor1, tensor2, *, value=1, out=None) -> Tensor
  412. Performs the element-wise multiplication of :attr:`tensor1`
  413. by :attr:`tensor2`, multiplies the result by the scalar :attr:`value`
  414. and adds it to :attr:`input`.
  415. .. math::
  416. \text{out}_i = \text{input}_i + \text{value} \times \text{tensor1}_i \times \text{tensor2}_i
  417. """
  418. + r"""
  419. The shapes of :attr:`tensor`, :attr:`tensor1`, and :attr:`tensor2` must be
  420. :ref:`broadcastable <broadcasting-semantics>`.
  421. For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be
  422. a real number, otherwise an integer.
  423. Args:
  424. input (Tensor): the tensor to be added
  425. tensor1 (Tensor): the tensor to be multiplied
  426. tensor2 (Tensor): the tensor to be multiplied
  427. Keyword args:
  428. value (Number, optional): multiplier for :math:`tensor1 .* tensor2`
  429. {out}
  430. Example::
  431. >>> t = torch.randn(1, 3)
  432. >>> t1 = torch.randn(3, 1)
  433. >>> t2 = torch.randn(1, 3)
  434. >>> torch.addcmul(t, t1, t2, value=0.1)
  435. tensor([[-0.8635, -0.6391, 1.6174],
  436. [-0.7617, -0.5879, 1.7388],
  437. [-0.8353, -0.6249, 1.6511]])
  438. """.format(
  439. **common_args
  440. ),
  441. )
  442. add_docstr(
  443. torch.addmm,
  444. r"""
  445. addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor
  446. Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`.
  447. The matrix :attr:`input` is added to the final result.
  448. If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a
  449. :math:`(m \times p)` tensor, then :attr:`input` must be
  450. :ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
  451. and :attr:`out` will be a :math:`(n \times p)` tensor.
  452. :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
  453. :attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively.
  454. .. math::
  455. \text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i)
  456. If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
  457. it will not be propagated.
  458. """
  459. + r"""
  460. For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
  461. :attr:`alpha` must be real numbers, otherwise they should be integers.
  462. This operation has support for arguments with :ref:`sparse layouts<sparse-docs>`. If
  463. :attr:`input` is sparse the result will have the same layout and if :attr:`out`
  464. is provided it must have the same layout as :attr:`input`.
  465. {sparse_beta_warning}
  466. {tf32_note}
  467. {rocm_fp16_note}
  468. Args:
  469. input (Tensor): matrix to be added
  470. mat1 (Tensor): the first matrix to be matrix multiplied
  471. mat2 (Tensor): the second matrix to be matrix multiplied
  472. Keyword args:
  473. beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
  474. alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
  475. {out}
  476. Example::
  477. >>> M = torch.randn(2, 3)
  478. >>> mat1 = torch.randn(2, 3)
  479. >>> mat2 = torch.randn(3, 3)
  480. >>> torch.addmm(M, mat1, mat2)
  481. tensor([[-4.8716, 1.4671, -1.3746],
  482. [ 0.7573, -3.9555, -2.8681]])
  483. """.format(
  484. **common_args, **tf32_notes, **rocm_fp16_notes, **sparse_support_notes
  485. ),
  486. )
  487. add_docstr(
  488. torch.adjoint,
  489. r"""
  490. adjoint(Tensor) -> Tensor
  491. Returns a view of the tensor conjugated and with the last two dimensions transposed.
  492. ``x.adjoint()`` is equivalent to ``x.transpose(-2, -1).conj()`` for complex tensors and
  493. to ``x.transpose(-2, -1)`` for real tensors.
  494. Example::
  495. >>> x = torch.arange(4, dtype=torch.float)
  496. >>> A = torch.complex(x, x).reshape(2, 2)
  497. >>> A
  498. tensor([[0.+0.j, 1.+1.j],
  499. [2.+2.j, 3.+3.j]])
  500. >>> A.adjoint()
  501. tensor([[0.-0.j, 2.-2.j],
  502. [1.-1.j, 3.-3.j]])
  503. >>> (A.adjoint() == A.mH).all()
  504. tensor(True)
  505. """,
  506. )
  507. add_docstr(
  508. torch.sspaddmm,
  509. r"""
  510. sspaddmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor
  511. Matrix multiplies a sparse tensor :attr:`mat1` with a dense tensor
  512. :attr:`mat2`, then adds the sparse tensor :attr:`input` to the result.
  513. Note: This function is equivalent to :func:`torch.addmm`, except
  514. :attr:`input` and :attr:`mat1` are sparse.
  515. Args:
  516. input (Tensor): a sparse matrix to be added
  517. mat1 (Tensor): a sparse matrix to be matrix multiplied
  518. mat2 (Tensor): a dense matrix to be matrix multiplied
  519. Keyword args:
  520. beta (Number, optional): multiplier for :attr:`mat` (:math:`\beta`)
  521. alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
  522. {out}
  523. """.format(
  524. **common_args
  525. ),
  526. )
  527. add_docstr(
  528. torch.smm,
  529. r"""
  530. smm(input, mat) -> Tensor
  531. Performs a matrix multiplication of the sparse matrix :attr:`input`
  532. with the dense matrix :attr:`mat`.
  533. Args:
  534. input (Tensor): a sparse matrix to be matrix multiplied
  535. mat (Tensor): a dense matrix to be matrix multiplied
  536. """,
  537. )
  538. add_docstr(
  539. torch.addmv,
  540. r"""
  541. addmv(input, mat, vec, *, beta=1, alpha=1, out=None) -> Tensor
  542. Performs a matrix-vector product of the matrix :attr:`mat` and
  543. the vector :attr:`vec`.
  544. The vector :attr:`input` is added to the final result.
  545. If :attr:`mat` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of
  546. size `m`, then :attr:`input` must be
  547. :ref:`broadcastable <broadcasting-semantics>` with a 1-D tensor of size `n` and
  548. :attr:`out` will be 1-D tensor of size `n`.
  549. :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
  550. :attr:`mat` and :attr:`vec` and the added tensor :attr:`input` respectively.
  551. .. math::
  552. \text{out} = \beta\ \text{input} + \alpha\ (\text{mat} \mathbin{@} \text{vec})
  553. If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
  554. it will not be propagated.
  555. """
  556. + r"""
  557. For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
  558. :attr:`alpha` must be real numbers, otherwise they should be integers.
  559. Args:
  560. input (Tensor): vector to be added
  561. mat (Tensor): matrix to be matrix multiplied
  562. vec (Tensor): vector to be matrix multiplied
  563. Keyword args:
  564. beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
  565. alpha (Number, optional): multiplier for :math:`mat @ vec` (:math:`\alpha`)
  566. {out}
  567. Example::
  568. >>> M = torch.randn(2)
  569. >>> mat = torch.randn(2, 3)
  570. >>> vec = torch.randn(3)
  571. >>> torch.addmv(M, mat, vec)
  572. tensor([-0.3768, -5.5565])
  573. """.format(
  574. **common_args
  575. ),
  576. )
  577. add_docstr(
  578. torch.addr,
  579. r"""
  580. addr(input, vec1, vec2, *, beta=1, alpha=1, out=None) -> Tensor
  581. Performs the outer-product of vectors :attr:`vec1` and :attr:`vec2`
  582. and adds it to the matrix :attr:`input`.
  583. Optional values :attr:`beta` and :attr:`alpha` are scaling factors on the
  584. outer product between :attr:`vec1` and :attr:`vec2` and the added matrix
  585. :attr:`input` respectively.
  586. .. math::
  587. \text{out} = \beta\ \text{input} + \alpha\ (\text{vec1} \otimes \text{vec2})
  588. If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
  589. it will not be propagated.
  590. """
  591. + r"""
  592. If :attr:`vec1` is a vector of size `n` and :attr:`vec2` is a vector
  593. of size `m`, then :attr:`input` must be
  594. :ref:`broadcastable <broadcasting-semantics>` with a matrix of size
  595. :math:`(n \times m)` and :attr:`out` will be a matrix of size
  596. :math:`(n \times m)`.
  597. Args:
  598. input (Tensor): matrix to be added
  599. vec1 (Tensor): the first vector of the outer product
  600. vec2 (Tensor): the second vector of the outer product
  601. Keyword args:
  602. beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
  603. alpha (Number, optional): multiplier for :math:`\text{{vec1}} \otimes \text{{vec2}}` (:math:`\alpha`)
  604. {out}
  605. Example::
  606. >>> vec1 = torch.arange(1., 4.)
  607. >>> vec2 = torch.arange(1., 3.)
  608. >>> M = torch.zeros(3, 2)
  609. >>> torch.addr(M, vec1, vec2)
  610. tensor([[ 1., 2.],
  611. [ 2., 4.],
  612. [ 3., 6.]])
  613. """.format(
  614. **common_args
  615. ),
  616. )
  617. add_docstr(
  618. torch.allclose,
  619. r"""
  620. allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False) -> bool
  621. This function checks if :attr:`input` and :attr:`other` satisfy the condition:
  622. .. math::
  623. \lvert \text{input} - \text{other} \rvert \leq \texttt{atol} + \texttt{rtol} \times \lvert \text{other} \rvert
  624. """
  625. + r"""
  626. elementwise, for all elements of :attr:`input` and :attr:`other`. The behaviour of this function is analogous to
  627. `numpy.allclose <https://docs.scipy.org/doc/numpy/reference/generated/numpy.allclose.html>`_
  628. Args:
  629. input (Tensor): first tensor to compare
  630. other (Tensor): second tensor to compare
  631. atol (float, optional): absolute tolerance. Default: 1e-08
  632. rtol (float, optional): relative tolerance. Default: 1e-05
  633. equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal. Default: ``False``
  634. Example::
  635. >>> torch.allclose(torch.tensor([10000., 1e-07]), torch.tensor([10000.1, 1e-08]))
  636. False
  637. >>> torch.allclose(torch.tensor([10000., 1e-08]), torch.tensor([10000.1, 1e-09]))
  638. True
  639. >>> torch.allclose(torch.tensor([1.0, float('nan')]), torch.tensor([1.0, float('nan')]))
  640. False
  641. >>> torch.allclose(torch.tensor([1.0, float('nan')]), torch.tensor([1.0, float('nan')]), equal_nan=True)
  642. True
  643. """,
  644. )
  645. add_docstr(
  646. torch.all,
  647. r"""
  648. all(input) -> Tensor
  649. Tests if all elements in :attr:`input` evaluate to `True`.
  650. .. note:: This function matches the behaviour of NumPy in returning
  651. output of dtype `bool` for all supported dtypes except `uint8`.
  652. For `uint8` the dtype of output is `uint8` itself.
  653. Example::
  654. >>> a = torch.rand(1, 2).bool()
  655. >>> a
  656. tensor([[False, True]], dtype=torch.bool)
  657. >>> torch.all(a)
  658. tensor(False, dtype=torch.bool)
  659. >>> a = torch.arange(0, 3)
  660. >>> a
  661. tensor([0, 1, 2])
  662. >>> torch.all(a)
  663. tensor(False)
  664. .. function:: all(input, dim, keepdim=False, *, out=None) -> Tensor
  665. :noindex:
  666. For each row of :attr:`input` in the given dimension :attr:`dim`,
  667. returns `True` if all elements in the row evaluate to `True` and `False` otherwise.
  668. {keepdim_details}
  669. Args:
  670. {input}
  671. {dim}
  672. {keepdim}
  673. Keyword args:
  674. {out}
  675. Example::
  676. >>> a = torch.rand(4, 2).bool()
  677. >>> a
  678. tensor([[True, True],
  679. [True, False],
  680. [True, True],
  681. [True, True]], dtype=torch.bool)
  682. >>> torch.all(a, dim=1)
  683. tensor([ True, False, True, True], dtype=torch.bool)
  684. >>> torch.all(a, dim=0)
  685. tensor([ True, False], dtype=torch.bool)
  686. """.format(
  687. **single_dim_common
  688. ),
  689. )
  690. add_docstr(
  691. torch.any,
  692. r"""
  693. any(input) -> Tensor
  694. Tests if any element in :attr:`input` evaluates to `True`.
  695. .. note:: This function matches the behaviour of NumPy in returning
  696. output of dtype `bool` for all supported dtypes except `uint8`.
  697. For `uint8` the dtype of output is `uint8` itself.
  698. Example::
  699. >>> a = torch.rand(1, 2).bool()
  700. >>> a
  701. tensor([[False, True]], dtype=torch.bool)
  702. >>> torch.any(a)
  703. tensor(True, dtype=torch.bool)
  704. >>> a = torch.arange(0, 3)
  705. >>> a
  706. tensor([0, 1, 2])
  707. >>> torch.any(a)
  708. tensor(True)
  709. .. function:: any(input, dim, keepdim=False, *, out=None) -> Tensor
  710. :noindex:
  711. For each row of :attr:`input` in the given dimension :attr:`dim`,
  712. returns `True` if any element in the row evaluate to `True` and `False` otherwise.
  713. {keepdim_details}
  714. Args:
  715. {input}
  716. {dim}
  717. {keepdim}
  718. Keyword args:
  719. {out}
  720. Example::
  721. >>> a = torch.randn(4, 2) < 0
  722. >>> a
  723. tensor([[ True, True],
  724. [False, True],
  725. [ True, True],
  726. [False, False]])
  727. >>> torch.any(a, 1)
  728. tensor([ True, True, True, False])
  729. >>> torch.any(a, 0)
  730. tensor([True, True])
  731. """.format(
  732. **single_dim_common
  733. ),
  734. )
  735. add_docstr(
  736. torch.angle,
  737. r"""
  738. angle(input, *, out=None) -> Tensor
  739. Computes the element-wise angle (in radians) of the given :attr:`input` tensor.
  740. .. math::
  741. \text{out}_{i} = angle(\text{input}_{i})
  742. """
  743. + r"""
  744. Args:
  745. {input}
  746. Keyword args:
  747. {out}
  748. .. note:: Starting in PyTorch 1.8, angle returns pi for negative real numbers,
  749. zero for non-negative real numbers, and propagates NaNs. Previously
  750. the function would return zero for all real numbers and not propagate
  751. floating-point NaNs.
  752. Example::
  753. >>> torch.angle(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]))*180/3.14159
  754. tensor([ 135., 135, -45])
  755. """.format(
  756. **common_args
  757. ),
  758. )
  759. add_docstr(
  760. torch.as_strided,
  761. r"""
  762. as_strided(input, size, stride, storage_offset=None) -> Tensor
  763. Create a view of an existing `torch.Tensor` :attr:`input` with specified
  764. :attr:`size`, :attr:`stride` and :attr:`storage_offset`.
  765. .. warning::
  766. Prefer using other view functions, like :meth:`torch.Tensor.expand`,
  767. to setting a view's strides manually with `as_strided`, as this
  768. function's behavior depends on the implementation of a tensor's storage.
  769. The constructed view of the storage must only refer to elements within
  770. the storage or a runtime error will be thrown, and if the view is
  771. "overlapped" (with multiple indices referring to the same element in
  772. memory) its behavior is undefined.
  773. Args:
  774. {input}
  775. size (tuple or ints): the shape of the output tensor
  776. stride (tuple or ints): the stride of the output tensor
  777. storage_offset (int, optional): the offset in the underlying storage of the output tensor.
  778. If ``None``, the storage_offset of the output tensor will match the input tensor.
  779. Example::
  780. >>> x = torch.randn(3, 3)
  781. >>> x
  782. tensor([[ 0.9039, 0.6291, 1.0795],
  783. [ 0.1586, 2.1939, -0.4900],
  784. [-0.1909, -0.7503, 1.9355]])
  785. >>> t = torch.as_strided(x, (2, 2), (1, 2))
  786. >>> t
  787. tensor([[0.9039, 1.0795],
  788. [0.6291, 0.1586]])
  789. >>> t = torch.as_strided(x, (2, 2), (1, 2), 1)
  790. tensor([[0.6291, 0.1586],
  791. [1.0795, 2.1939]])
  792. """.format(
  793. **common_args
  794. ),
  795. )
  796. add_docstr(
  797. torch.as_tensor,
  798. r"""
  799. as_tensor(data, dtype=None, device=None) -> Tensor
  800. Converts :attr:`data` into a tensor, sharing data and preserving autograd
  801. history if possible.
  802. If :attr:`data` is already a tensor with the requested dtype and device
  803. then :attr:`data` itself is returned, but if :attr:`data` is a
  804. tensor with a different dtype or device then it's copied as if using
  805. `data.to(dtype=dtype, device=device)`.
  806. If :attr:`data` is a NumPy array (an ndarray) with the same dtype and device then a
  807. tensor is constructed using :func:`torch.from_numpy`.
  808. .. seealso::
  809. :func:`torch.tensor` never shares its data and creates a new "leaf tensor" (see :doc:`/notes/autograd`).
  810. Args:
  811. {data}
  812. {dtype}
  813. device (:class:`torch.device`, optional): the device of the constructed tensor. If None and data is a tensor
  814. then the device of data is used. If None and data is not a tensor then
  815. the result tensor is constructed on the CPU.
  816. Example::
  817. >>> a = numpy.array([1, 2, 3])
  818. >>> t = torch.as_tensor(a)
  819. >>> t
  820. tensor([ 1, 2, 3])
  821. >>> t[0] = -1
  822. >>> a
  823. array([-1, 2, 3])
  824. >>> a = numpy.array([1, 2, 3])
  825. >>> t = torch.as_tensor(a, device=torch.device('cuda'))
  826. >>> t
  827. tensor([ 1, 2, 3])
  828. >>> t[0] = -1
  829. >>> a
  830. array([1, 2, 3])
  831. """.format(
  832. **factory_data_common_args
  833. ),
  834. )
  835. add_docstr(
  836. torch.asin,
  837. r"""
  838. asin(input, *, out=None) -> Tensor
  839. Returns a new tensor with the arcsine of the elements of :attr:`input`.
  840. .. math::
  841. \text{out}_{i} = \sin^{-1}(\text{input}_{i})
  842. """
  843. + r"""
  844. Args:
  845. {input}
  846. Keyword args:
  847. {out}
  848. Example::
  849. >>> a = torch.randn(4)
  850. >>> a
  851. tensor([-0.5962, 1.4985, -0.4396, 1.4525])
  852. >>> torch.asin(a)
  853. tensor([-0.6387, nan, -0.4552, nan])
  854. """.format(
  855. **common_args
  856. ),
  857. )
  858. add_docstr(
  859. torch.arcsin,
  860. r"""
  861. arcsin(input, *, out=None) -> Tensor
  862. Alias for :func:`torch.asin`.
  863. """,
  864. )
  865. add_docstr(
  866. torch.asinh,
  867. r"""
  868. asinh(input, *, out=None) -> Tensor
  869. Returns a new tensor with the inverse hyperbolic sine of the elements of :attr:`input`.
  870. .. math::
  871. \text{out}_{i} = \sinh^{-1}(\text{input}_{i})
  872. """
  873. + r"""
  874. Args:
  875. {input}
  876. Keyword arguments:
  877. {out}
  878. Example::
  879. >>> a = torch.randn(4)
  880. >>> a
  881. tensor([ 0.1606, -1.4267, -1.0899, -1.0250 ])
  882. >>> torch.asinh(a)
  883. tensor([ 0.1599, -1.1534, -0.9435, -0.8990 ])
  884. """.format(
  885. **common_args
  886. ),
  887. )
  888. add_docstr(
  889. torch.arcsinh,
  890. r"""
  891. arcsinh(input, *, out=None) -> Tensor
  892. Alias for :func:`torch.asinh`.
  893. """,
  894. )
  895. add_docstr(
  896. torch.atan,
  897. r"""
  898. atan(input, *, out=None) -> Tensor
  899. Returns a new tensor with the arctangent of the elements of :attr:`input`.
  900. .. math::
  901. \text{out}_{i} = \tan^{-1}(\text{input}_{i})
  902. """
  903. + r"""
  904. Args:
  905. {input}
  906. Keyword args:
  907. {out}
  908. Example::
  909. >>> a = torch.randn(4)
  910. >>> a
  911. tensor([ 0.2341, 0.2539, -0.6256, -0.6448])
  912. >>> torch.atan(a)
  913. tensor([ 0.2299, 0.2487, -0.5591, -0.5727])
  914. """.format(
  915. **common_args
  916. ),
  917. )
  918. add_docstr(
  919. torch.arctan,
  920. r"""
  921. arctan(input, *, out=None) -> Tensor
  922. Alias for :func:`torch.atan`.
  923. """,
  924. )
  925. add_docstr(
  926. torch.atan2,
  927. r"""
  928. atan2(input, other, *, out=None) -> Tensor
  929. Element-wise arctangent of :math:`\text{{input}}_{{i}} / \text{{other}}_{{i}}`
  930. with consideration of the quadrant. Returns a new tensor with the signed angles
  931. in radians between vector :math:`(\text{{other}}_{{i}}, \text{{input}}_{{i}})`
  932. and vector :math:`(1, 0)`. (Note that :math:`\text{{other}}_{{i}}`, the second
  933. parameter, is the x-coordinate, while :math:`\text{{input}}_{{i}}`, the first
  934. parameter, is the y-coordinate.)
  935. The shapes of ``input`` and ``other`` must be
  936. :ref:`broadcastable <broadcasting-semantics>`.
  937. Args:
  938. input (Tensor): the first input tensor
  939. other (Tensor): the second input tensor
  940. Keyword args:
  941. {out}
  942. Example::
  943. >>> a = torch.randn(4)
  944. >>> a
  945. tensor([ 0.9041, 0.0196, -0.3108, -2.4423])
  946. >>> torch.atan2(a, torch.randn(4))
  947. tensor([ 0.9833, 0.0811, -1.9743, -1.4151])
  948. """.format(
  949. **common_args
  950. ),
  951. )
  952. add_docstr(
  953. torch.arctan2,
  954. r"""
  955. arctan2(input, other, *, out=None) -> Tensor
  956. Alias for :func:`torch.atan2`.
  957. """,
  958. )
  959. add_docstr(
  960. torch.atanh,
  961. r"""
  962. atanh(input, *, out=None) -> Tensor
  963. Returns a new tensor with the inverse hyperbolic tangent of the elements of :attr:`input`.
  964. Note:
  965. The domain of the inverse hyperbolic tangent is `(-1, 1)` and values outside this range
  966. will be mapped to ``NaN``, except for the values `1` and `-1` for which the output is
  967. mapped to `+/-INF` respectively.
  968. .. math::
  969. \text{out}_{i} = \tanh^{-1}(\text{input}_{i})
  970. """
  971. + r"""
  972. Args:
  973. {input}
  974. Keyword arguments:
  975. {out}
  976. Example::
  977. >>> a = torch.randn(4).uniform_(-1, 1)
  978. >>> a
  979. tensor([ -0.9385, 0.2968, -0.8591, -0.1871 ])
  980. >>> torch.atanh(a)
  981. tensor([ -1.7253, 0.3060, -1.2899, -0.1893 ])
  982. """.format(
  983. **common_args
  984. ),
  985. )
  986. add_docstr(
  987. torch.arctanh,
  988. r"""
  989. arctanh(input, *, out=None) -> Tensor
  990. Alias for :func:`torch.atanh`.
  991. """,
  992. )
  993. add_docstr(
  994. torch.asarray,
  995. r"""
  996. asarray(obj, *, dtype=None, device=None, copy=None, requires_grad=False) -> Tensor
  997. Converts :attr:`obj` to a tensor.
  998. :attr:`obj` can be one of:
  999. 1. a tensor
  1000. 2. a NumPy array or a NumPy scalar
  1001. 3. a DLPack capsule
  1002. 4. an object that implements Python's buffer protocol
  1003. 5. a scalar
  1004. 6. a sequence of scalars
  1005. When :attr:`obj` is a tensor, NumPy array, or DLPack capsule the returned tensor will,
  1006. by default, not require a gradient, have the same datatype as :attr:`obj`, be on the
  1007. same device, and share memory with it. These properties can be controlled with the
  1008. :attr:`dtype`, :attr:`device`, :attr:`copy`, and :attr:`requires_grad` keyword arguments.
  1009. If the returned tensor is of a different datatype, on a different device, or a copy is
  1010. requested then it will not share its memory with :attr:`obj`. If :attr:`requires_grad`
  1011. is ``True`` then the returned tensor will require a gradient, and if :attr:`obj` is
  1012. also a tensor with an autograd history then the returned tensor will have the same history.
  1013. When :attr:`obj` is not a tensor, NumPy array, or DLPack capsule but implements Python's
  1014. buffer protocol then the buffer is interpreted as an array of bytes grouped according to
  1015. the size of the datatype passed to the :attr:`dtype` keyword argument. (If no datatype is
  1016. passed then the default floating point datatype is used, instead.) The returned tensor
  1017. will have the specified datatype (or default floating point datatype if none is specified)
  1018. and, by default, be on the CPU device and share memory with the buffer.
  1019. When :attr:`obj` is a NumPy scalar, the returned tensor will be a 0-dimensional tensor on
  1020. the CPU and that doesn't share its memory (i.e. ``copy=True``). By default datatype will
  1021. be the PyTorch datatype corresponding to the NumPy's scalar's datatype.
  1022. When :attr:`obj` is none of the above but a scalar, or a sequence of scalars then the
  1023. returned tensor will, by default, infer its datatype from the scalar values, be on the
  1024. CPU device, and not share its memory.
  1025. .. seealso::
  1026. :func:`torch.tensor` creates a tensor that always copies the data from the input object.
  1027. :func:`torch.from_numpy` creates a tensor that always shares memory from NumPy arrays.
  1028. :func:`torch.frombuffer` creates a tensor that always shares memory from objects that
  1029. implement the buffer protocol.
  1030. :func:`torch.from_dlpack` creates a tensor that always shares memory from
  1031. DLPack capsules.
  1032. Args:
  1033. obj (object): a tensor, NumPy array, DLPack Capsule, object that implements Python's
  1034. buffer protocol, scalar, or sequence of scalars.
  1035. Keyword args:
  1036. dtype (:class:`torch.dtype`, optional): the datatype of the returned tensor.
  1037. Default: ``None``, which causes the datatype of the returned tensor to be
  1038. inferred from :attr:`obj`.
  1039. copy (bool, optional): controls whether the returned tensor shares memory with :attr:`obj`.
  1040. Default: ``None``, which causes the returned tensor to share memory with :attr:`obj`
  1041. whenever possible. If ``True`` then the returned tensor does not share its memory.
  1042. If ``False`` then the returned tensor shares its memory with :attr:`obj` and an
  1043. error is thrown if it cannot.
  1044. device (:class:`torch.device`, optional): the device of the returned tensor.
  1045. Default: ``None``, which causes the device of :attr:`obj` to be used.
  1046. requires_grad (bool, optional): whether the returned tensor requires grad.
  1047. Default: ``False``, which causes the returned tensor not to require a gradient.
  1048. If ``True``, then the returned tensor will require a gradient, and if :attr:`obj`
  1049. is also a tensor with an autograd history then the returned tensor will have
  1050. the same history.
  1051. Example::
  1052. >>> a = torch.tensor([1, 2, 3])
  1053. >>> # Shares memory with tensor 'a'
  1054. >>> b = torch.asarray(a)
  1055. >>> a.data_ptr() == b.data_ptr()
  1056. True
  1057. >>> # Forces memory copy
  1058. >>> c = torch.asarray(a, copy=True)
  1059. >>> a.data_ptr() == c.data_ptr()
  1060. False
  1061. >>> a = torch.tensor([1, 2, 3], requires_grad=True).float()
  1062. >>> b = a + 2
  1063. >>> b
  1064. tensor([1., 2., 3.], grad_fn=<AddBackward0>)
  1065. >>> # Shares memory with tensor 'b', with no grad
  1066. >>> c = torch.asarray(b)
  1067. >>> c
  1068. tensor([1., 2., 3.])
  1069. >>> # Shares memory with tensor 'b', retaining autograd history
  1070. >>> d = torch.asarray(b, requires_grad=True)
  1071. >>> d
  1072. tensor([1., 2., 3.], grad_fn=<AddBackward0>)
  1073. >>> array = numpy.array([1, 2, 3])
  1074. >>> # Shares memory with array 'array'
  1075. >>> t1 = torch.asarray(array)
  1076. >>> array.__array_interface__['data'][0] == t1.data_ptr()
  1077. True
  1078. >>> # Copies memory due to dtype mismatch
  1079. >>> t2 = torch.asarray(array, dtype=torch.float32)
  1080. >>> array.__array_interface__['data'][0] == t1.data_ptr()
  1081. False
  1082. >>> scalar = numpy.float64(0.5)
  1083. >>> torch.asarray(scalar)
  1084. tensor(0.5000, dtype=torch.float64)
  1085. """,
  1086. )
  1087. add_docstr(
  1088. torch.baddbmm,
  1089. r"""
  1090. baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
  1091. Performs a batch matrix-matrix product of matrices in :attr:`batch1`
  1092. and :attr:`batch2`.
  1093. :attr:`input` is added to the final result.
  1094. :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the same
  1095. number of matrices.
  1096. If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
  1097. :math:`(b \times m \times p)` tensor, then :attr:`input` must be
  1098. :ref:`broadcastable <broadcasting-semantics>` with a
  1099. :math:`(b \times n \times p)` tensor and :attr:`out` will be a
  1100. :math:`(b \times n \times p)` tensor. Both :attr:`alpha` and :attr:`beta` mean the
  1101. same as the scaling factors used in :meth:`torch.addbmm`.
  1102. .. math::
  1103. \text{out}_i = \beta\ \text{input}_i + \alpha\ (\text{batch1}_i \mathbin{@} \text{batch2}_i)
  1104. If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
  1105. it will not be propagated.
  1106. """
  1107. + r"""
  1108. For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
  1109. :attr:`alpha` must be real numbers, otherwise they should be integers.
  1110. {tf32_note}
  1111. {rocm_fp16_note}
  1112. Args:
  1113. input (Tensor): the tensor to be added
  1114. batch1 (Tensor): the first batch of matrices to be multiplied
  1115. batch2 (Tensor): the second batch of matrices to be multiplied
  1116. Keyword args:
  1117. beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
  1118. alpha (Number, optional): multiplier for :math:`\text{{batch1}} \mathbin{{@}} \text{{batch2}}` (:math:`\alpha`)
  1119. {out}
  1120. Example::
  1121. >>> M = torch.randn(10, 3, 5)
  1122. >>> batch1 = torch.randn(10, 3, 4)
  1123. >>> batch2 = torch.randn(10, 4, 5)
  1124. >>> torch.baddbmm(M, batch1, batch2).size()
  1125. torch.Size([10, 3, 5])
  1126. """.format(
  1127. **common_args, **tf32_notes, **rocm_fp16_notes
  1128. ),
  1129. )
  1130. add_docstr(
  1131. torch.bernoulli,
  1132. r"""
  1133. bernoulli(input, *, generator=None, out=None) -> Tensor
  1134. Draws binary random numbers (0 or 1) from a Bernoulli distribution.
  1135. The :attr:`input` tensor should be a tensor containing probabilities
  1136. to be used for drawing the binary random number.
  1137. Hence, all values in :attr:`input` have to be in the range:
  1138. :math:`0 \leq \text{input}_i \leq 1`.
  1139. The :math:`\text{i}^{th}` element of the output tensor will draw a
  1140. value :math:`1` according to the :math:`\text{i}^{th}` probability value given
  1141. in :attr:`input`.
  1142. .. math::
  1143. \text{out}_{i} \sim \mathrm{Bernoulli}(p = \text{input}_{i})
  1144. """
  1145. + r"""
  1146. The returned :attr:`out` tensor only has values 0 or 1 and is of the same
  1147. shape as :attr:`input`.
  1148. :attr:`out` can have integral ``dtype``, but :attr:`input` must have floating
  1149. point ``dtype``.
  1150. Args:
  1151. input (Tensor): the input tensor of probability values for the Bernoulli distribution
  1152. Keyword args:
  1153. {generator}
  1154. {out}
  1155. Example::
  1156. >>> a = torch.empty(3, 3).uniform_(0, 1) # generate a uniform random matrix with range [0, 1]
  1157. >>> a
  1158. tensor([[ 0.1737, 0.0950, 0.3609],
  1159. [ 0.7148, 0.0289, 0.2676],
  1160. [ 0.9456, 0.8937, 0.7202]])
  1161. >>> torch.bernoulli(a)
  1162. tensor([[ 1., 0., 0.],
  1163. [ 0., 0., 0.],
  1164. [ 1., 1., 1.]])
  1165. >>> a = torch.ones(3, 3) # probability of drawing "1" is 1
  1166. >>> torch.bernoulli(a)
  1167. tensor([[ 1., 1., 1.],
  1168. [ 1., 1., 1.],
  1169. [ 1., 1., 1.]])
  1170. >>> a = torch.zeros(3, 3) # probability of drawing "1" is 0
  1171. >>> torch.bernoulli(a)
  1172. tensor([[ 0., 0., 0.],
  1173. [ 0., 0., 0.],
  1174. [ 0., 0., 0.]])
  1175. """.format(
  1176. **common_args
  1177. ),
  1178. )
  1179. add_docstr(
  1180. torch.bincount,
  1181. r"""
  1182. bincount(input, weights=None, minlength=0) -> Tensor
  1183. Count the frequency of each value in an array of non-negative ints.
  1184. The number of bins (size 1) is one larger than the largest value in
  1185. :attr:`input` unless :attr:`input` is empty, in which case the result is a
  1186. tensor of size 0. If :attr:`minlength` is specified, the number of bins is at least
  1187. :attr:`minlength` and if :attr:`input` is empty, then the result is tensor of size
  1188. :attr:`minlength` filled with zeros. If ``n`` is the value at position ``i``,
  1189. ``out[n] += weights[i]`` if :attr:`weights` is specified else
  1190. ``out[n] += 1``.
  1191. Note:
  1192. {backward_reproducibility_note}
  1193. Arguments:
  1194. input (Tensor): 1-d int tensor
  1195. weights (Tensor): optional, weight for each value in the input tensor.
  1196. Should be of same size as input tensor.
  1197. minlength (int): optional, minimum number of bins. Should be non-negative.
  1198. Returns:
  1199. output (Tensor): a tensor of shape ``Size([max(input) + 1])`` if
  1200. :attr:`input` is non-empty, else ``Size(0)``
  1201. Example::
  1202. >>> input = torch.randint(0, 8, (5,), dtype=torch.int64)
  1203. >>> weights = torch.linspace(0, 1, steps=5)
  1204. >>> input, weights
  1205. (tensor([4, 3, 6, 3, 4]),
  1206. tensor([ 0.0000, 0.2500, 0.5000, 0.7500, 1.0000])
  1207. >>> torch.bincount(input)
  1208. tensor([0, 0, 0, 2, 2, 0, 1])
  1209. >>> input.bincount(weights)
  1210. tensor([0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 0.0000, 0.5000])
  1211. """.format(
  1212. **reproducibility_notes
  1213. ),
  1214. )
  1215. add_docstr(
  1216. torch.bitwise_not,
  1217. r"""
  1218. bitwise_not(input, *, out=None) -> Tensor
  1219. Computes the bitwise NOT of the given input tensor. The input tensor must be of
  1220. integral or Boolean types. For bool tensors, it computes the logical NOT.
  1221. Args:
  1222. {input}
  1223. Keyword args:
  1224. {out}
  1225. Example::
  1226. >>> torch.bitwise_not(torch.tensor([-1, -2, 3], dtype=torch.int8))
  1227. tensor([ 0, 1, -4], dtype=torch.int8)
  1228. """.format(
  1229. **common_args
  1230. ),
  1231. )
  1232. add_docstr(
  1233. torch.bmm,
  1234. r"""
  1235. bmm(input, mat2, *, out=None) -> Tensor
  1236. Performs a batch matrix-matrix product of matrices stored in :attr:`input`
  1237. and :attr:`mat2`.
  1238. :attr:`input` and :attr:`mat2` must be 3-D tensors each containing
  1239. the same number of matrices.
  1240. If :attr:`input` is a :math:`(b \times n \times m)` tensor, :attr:`mat2` is a
  1241. :math:`(b \times m \times p)` tensor, :attr:`out` will be a
  1242. :math:`(b \times n \times p)` tensor.
  1243. .. math::
  1244. \text{out}_i = \text{input}_i \mathbin{@} \text{mat2}_i
  1245. """
  1246. + r"""
  1247. {tf32_note}
  1248. {rocm_fp16_note}
  1249. .. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
  1250. For broadcasting matrix products, see :func:`torch.matmul`.
  1251. Args:
  1252. input (Tensor): the first batch of matrices to be multiplied
  1253. mat2 (Tensor): the second batch of matrices to be multiplied
  1254. Keyword Args:
  1255. {out}
  1256. Example::
  1257. >>> input = torch.randn(10, 3, 4)
  1258. >>> mat2 = torch.randn(10, 4, 5)
  1259. >>> res = torch.bmm(input, mat2)
  1260. >>> res.size()
  1261. torch.Size([10, 3, 5])
  1262. """.format(
  1263. **common_args, **tf32_notes, **rocm_fp16_notes
  1264. ),
  1265. )
  1266. add_docstr(
  1267. torch.bitwise_and,
  1268. r"""
  1269. bitwise_and(input, other, *, out=None) -> Tensor
  1270. Computes the bitwise AND of :attr:`input` and :attr:`other`. The input tensor must be of
  1271. integral or Boolean types. For bool tensors, it computes the logical AND.
  1272. Args:
  1273. input: the first input tensor
  1274. other: the second input tensor
  1275. Keyword args:
  1276. {out}
  1277. Example::
  1278. >>> torch.bitwise_and(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
  1279. tensor([1, 0, 3], dtype=torch.int8)
  1280. >>> torch.bitwise_and(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
  1281. tensor([ False, True, False])
  1282. """.format(
  1283. **common_args
  1284. ),
  1285. )
  1286. add_docstr(
  1287. torch.bitwise_or,
  1288. r"""
  1289. bitwise_or(input, other, *, out=None) -> Tensor
  1290. Computes the bitwise OR of :attr:`input` and :attr:`other`. The input tensor must be of
  1291. integral or Boolean types. For bool tensors, it computes the logical OR.
  1292. Args:
  1293. input: the first input tensor
  1294. other: the second input tensor
  1295. Keyword args:
  1296. {out}
  1297. Example::
  1298. >>> torch.bitwise_or(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
  1299. tensor([-1, -2, 3], dtype=torch.int8)
  1300. >>> torch.bitwise_or(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
  1301. tensor([ True, True, False])
  1302. """.format(
  1303. **common_args
  1304. ),
  1305. )
  1306. add_docstr(
  1307. torch.bitwise_xor,
  1308. r"""
  1309. bitwise_xor(input, other, *, out=None) -> Tensor
  1310. Computes the bitwise XOR of :attr:`input` and :attr:`other`. The input tensor must be of
  1311. integral or Boolean types. For bool tensors, it computes the logical XOR.
  1312. Args:
  1313. input: the first input tensor
  1314. other: the second input tensor
  1315. Keyword args:
  1316. {out}
  1317. Example::
  1318. >>> torch.bitwise_xor(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
  1319. tensor([-2, -2, 0], dtype=torch.int8)
  1320. >>> torch.bitwise_xor(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
  1321. tensor([ True, False, False])
  1322. """.format(
  1323. **common_args
  1324. ),
  1325. )
  1326. add_docstr(
  1327. torch.bitwise_left_shift,
  1328. r"""
  1329. bitwise_left_shift(input, other, *, out=None) -> Tensor
  1330. Computes the left arithmetic shift of :attr:`input` by :attr:`other` bits.
  1331. The input tensor must be of integral type. This operator supports
  1332. :ref:`broadcasting to a common shape <broadcasting-semantics>` and
  1333. :ref:`type promotion <type-promotion-doc>`.
  1334. The operation applied is:
  1335. .. math::
  1336. \text{{out}}_i = \text{{input}}_i << \text{{other}}_i
  1337. Args:
  1338. input (Tensor or Scalar): the first input tensor
  1339. other (Tensor or Scalar): the second input tensor
  1340. Keyword args:
  1341. {out}
  1342. Example::
  1343. >>> torch.bitwise_left_shift(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
  1344. tensor([-2, -2, 24], dtype=torch.int8)
  1345. """.format(
  1346. **common_args
  1347. ),
  1348. )
  1349. add_docstr(
  1350. torch.bitwise_right_shift,
  1351. r"""
  1352. bitwise_right_shift(input, other, *, out=None) -> Tensor
  1353. Computes the right arithmetic shift of :attr:`input` by :attr:`other` bits.
  1354. The input tensor must be of integral type. This operator supports
  1355. :ref:`broadcasting to a common shape <broadcasting-semantics>` and
  1356. :ref:`type promotion <type-promotion-doc>`.
  1357. The operation applied is:
  1358. .. math::
  1359. \text{{out}}_i = \text{{input}}_i >> \text{{other}}_i
  1360. Args:
  1361. input (Tensor or Scalar): the first input tensor
  1362. other (Tensor or Scalar): the second input tensor
  1363. Keyword args:
  1364. {out}
  1365. Example::
  1366. >>> torch.bitwise_right_shift(torch.tensor([-2, -7, 31], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
  1367. tensor([-1, -7, 3], dtype=torch.int8)
  1368. """.format(
  1369. **common_args
  1370. ),
  1371. )
  1372. add_docstr(
  1373. torch.broadcast_to,
  1374. r"""
  1375. broadcast_to(input, shape) -> Tensor
  1376. Broadcasts :attr:`input` to the shape :attr:`\shape`.
  1377. Equivalent to calling ``input.expand(shape)``. See :meth:`~Tensor.expand` for details.
  1378. Args:
  1379. {input}
  1380. shape (list, tuple, or :class:`torch.Size`): the new shape.
  1381. Example::
  1382. >>> x = torch.tensor([1, 2, 3])
  1383. >>> torch.broadcast_to(x, (3, 3))
  1384. tensor([[1, 2, 3],
  1385. [1, 2, 3],
  1386. [1, 2, 3]])
  1387. """.format(
  1388. **common_args
  1389. ),
  1390. )
  1391. add_docstr(
  1392. torch.stack,
  1393. r"""
  1394. stack(tensors, dim=0, *, out=None) -> Tensor
  1395. Concatenates a sequence of tensors along a new dimension.
  1396. All tensors need to be of the same size.
  1397. Arguments:
  1398. tensors (sequence of Tensors): sequence of tensors to concatenate
  1399. dim (int): dimension to insert. Has to be between 0 and the number
  1400. of dimensions of concatenated tensors (inclusive)
  1401. Keyword args:
  1402. {out}
  1403. """.format(
  1404. **common_args
  1405. ),
  1406. )
  1407. add_docstr(
  1408. torch.hstack,
  1409. r"""
  1410. hstack(tensors, *, out=None) -> Tensor
  1411. Stack tensors in sequence horizontally (column wise).
  1412. This is equivalent to concatenation along the first axis for 1-D tensors, and along the second axis for all other tensors.
  1413. Args:
  1414. tensors (sequence of Tensors): sequence of tensors to concatenate
  1415. Keyword args:
  1416. {out}
  1417. Example::
  1418. >>> a = torch.tensor([1, 2, 3])
  1419. >>> b = torch.tensor([4, 5, 6])
  1420. >>> torch.hstack((a,b))
  1421. tensor([1, 2, 3, 4, 5, 6])
  1422. >>> a = torch.tensor([[1],[2],[3]])
  1423. >>> b = torch.tensor([[4],[5],[6]])
  1424. >>> torch.hstack((a,b))
  1425. tensor([[1, 4],
  1426. [2, 5],
  1427. [3, 6]])
  1428. """.format(
  1429. **common_args
  1430. ),
  1431. )
  1432. add_docstr(
  1433. torch.vstack,
  1434. r"""
  1435. vstack(tensors, *, out=None) -> Tensor
  1436. Stack tensors in sequence vertically (row wise).
  1437. This is equivalent to concatenation along the first axis after all 1-D tensors have been reshaped by :func:`torch.atleast_2d`.
  1438. Args:
  1439. tensors (sequence of Tensors): sequence of tensors to concatenate
  1440. Keyword args:
  1441. {out}
  1442. Example::
  1443. >>> a = torch.tensor([1, 2, 3])
  1444. >>> b = torch.tensor([4, 5, 6])
  1445. >>> torch.vstack((a,b))
  1446. tensor([[1, 2, 3],
  1447. [4, 5, 6]])
  1448. >>> a = torch.tensor([[1],[2],[3]])
  1449. >>> b = torch.tensor([[4],[5],[6]])
  1450. >>> torch.vstack((a,b))
  1451. tensor([[1],
  1452. [2],
  1453. [3],
  1454. [4],
  1455. [5],
  1456. [6]])
  1457. """.format(
  1458. **common_args
  1459. ),
  1460. )
  1461. add_docstr(
  1462. torch.dstack,
  1463. r"""
  1464. dstack(tensors, *, out=None) -> Tensor
  1465. Stack tensors in sequence depthwise (along third axis).
  1466. This is equivalent to concatenation along the third axis after 1-D and 2-D tensors have been reshaped by :func:`torch.atleast_3d`.
  1467. Args:
  1468. tensors (sequence of Tensors): sequence of tensors to concatenate
  1469. Keyword args:
  1470. {out}
  1471. Example::
  1472. >>> a = torch.tensor([1, 2, 3])
  1473. >>> b = torch.tensor([4, 5, 6])
  1474. >>> torch.dstack((a,b))
  1475. tensor([[[1, 4],
  1476. [2, 5],
  1477. [3, 6]]])
  1478. >>> a = torch.tensor([[1],[2],[3]])
  1479. >>> b = torch.tensor([[4],[5],[6]])
  1480. >>> torch.dstack((a,b))
  1481. tensor([[[1, 4]],
  1482. [[2, 5]],
  1483. [[3, 6]]])
  1484. """.format(
  1485. **common_args
  1486. ),
  1487. )
  1488. add_docstr(
  1489. torch.tensor_split,
  1490. r"""
  1491. tensor_split(input, indices_or_sections, dim=0) -> List of Tensors
  1492. Splits a tensor into multiple sub-tensors, all of which are views of :attr:`input`,
  1493. along dimension :attr:`dim` according to the indices or number of sections specified
  1494. by :attr:`indices_or_sections`. This function is based on NumPy's
  1495. :func:`numpy.array_split`.
  1496. Args:
  1497. input (Tensor): the tensor to split
  1498. indices_or_sections (Tensor, int or list or tuple of ints):
  1499. If :attr:`indices_or_sections` is an integer ``n`` or a zero dimensional long tensor
  1500. with value ``n``, :attr:`input` is split into ``n`` sections along dimension :attr:`dim`.
  1501. If :attr:`input` is divisible by ``n`` along dimension :attr:`dim`, each
  1502. section will be of equal size, :code:`input.size(dim) / n`. If :attr:`input`
  1503. is not divisible by ``n``, the sizes of the first :code:`int(input.size(dim) % n)`
  1504. sections will have size :code:`int(input.size(dim) / n) + 1`, and the rest will
  1505. have size :code:`int(input.size(dim) / n)`.
  1506. If :attr:`indices_or_sections` is a list or tuple of ints, or a one-dimensional long
  1507. tensor, then :attr:`input` is split along dimension :attr:`dim` at each of the indices
  1508. in the list, tuple or tensor. For instance, :code:`indices_or_sections=[2, 3]` and :code:`dim=0`
  1509. would result in the tensors :code:`input[:2]`, :code:`input[2:3]`, and :code:`input[3:]`.
  1510. If :attr:`indices_or_sections` is a tensor, it must be a zero-dimensional or one-dimensional
  1511. long tensor on the CPU.
  1512. dim (int, optional): dimension along which to split the tensor. Default: ``0``
  1513. Example::
  1514. >>> x = torch.arange(8)
  1515. >>> torch.tensor_split(x, 3)
  1516. (tensor([0, 1, 2]), tensor([3, 4, 5]), tensor([6, 7]))
  1517. >>> x = torch.arange(7)
  1518. >>> torch.tensor_split(x, 3)
  1519. (tensor([0, 1, 2]), tensor([3, 4]), tensor([5, 6]))
  1520. >>> torch.tensor_split(x, (1, 6))
  1521. (tensor([0]), tensor([1, 2, 3, 4, 5]), tensor([6]))
  1522. >>> x = torch.arange(14).reshape(2, 7)
  1523. >>> x
  1524. tensor([[ 0, 1, 2, 3, 4, 5, 6],
  1525. [ 7, 8, 9, 10, 11, 12, 13]])
  1526. >>> torch.tensor_split(x, 3, dim=1)
  1527. (tensor([[0, 1, 2],
  1528. [7, 8, 9]]),
  1529. tensor([[ 3, 4],
  1530. [10, 11]]),
  1531. tensor([[ 5, 6],
  1532. [12, 13]]))
  1533. >>> torch.tensor_split(x, (1, 6), dim=1)
  1534. (tensor([[0],
  1535. [7]]),
  1536. tensor([[ 1, 2, 3, 4, 5],
  1537. [ 8, 9, 10, 11, 12]]),
  1538. tensor([[ 6],
  1539. [13]]))
  1540. """,
  1541. )
  1542. add_docstr(
  1543. torch.chunk,
  1544. r"""
  1545. chunk(input, chunks, dim=0) -> List of Tensors
  1546. Attempts to split a tensor into the specified number of chunks. Each chunk is a view of
  1547. the input tensor.
  1548. .. note::
  1549. This function may return fewer than the specified number of chunks!
  1550. .. seealso::
  1551. :func:`torch.tensor_split` a function that always returns exactly the specified number of chunks
  1552. If the tensor size along the given dimension :attr:`dim` is divisible by :attr:`chunks`,
  1553. all returned chunks will be the same size.
  1554. If the tensor size along the given dimension :attr:`dim` is not divisible by :attr:`chunks`,
  1555. all returned chunks will be the same size, except the last one.
  1556. If such division is not possible, this function may return fewer
  1557. than the specified number of chunks.
  1558. Arguments:
  1559. input (Tensor): the tensor to split
  1560. chunks (int): number of chunks to return
  1561. dim (int): dimension along which to split the tensor
  1562. Example:
  1563. >>> torch.arange(11).chunk(6)
  1564. (tensor([0, 1]),
  1565. tensor([2, 3]),
  1566. tensor([4, 5]),
  1567. tensor([6, 7]),
  1568. tensor([8, 9]),
  1569. tensor([10]))
  1570. >>> torch.arange(12).chunk(6)
  1571. (tensor([0, 1]),
  1572. tensor([2, 3]),
  1573. tensor([4, 5]),
  1574. tensor([6, 7]),
  1575. tensor([8, 9]),
  1576. tensor([10, 11]))
  1577. >>> torch.arange(13).chunk(6)
  1578. (tensor([0, 1, 2]),
  1579. tensor([3, 4, 5]),
  1580. tensor([6, 7, 8]),
  1581. tensor([ 9, 10, 11]),
  1582. tensor([12]))
  1583. """,
  1584. )
  1585. add_docstr(
  1586. torch.unsafe_chunk,
  1587. r"""
  1588. unsafe_chunk(input, chunks, dim=0) -> List of Tensors
  1589. Works like :func:`torch.chunk` but without enforcing the autograd restrictions
  1590. on inplace modification of the outputs.
  1591. .. warning::
  1592. This function is safe to use as long as only the input, or only the outputs
  1593. are modified inplace after calling this function. It is user's
  1594. responsibility to ensure that is the case. If both the input and one or more
  1595. of the outputs are modified inplace, gradients computed by autograd will be
  1596. silently incorrect.
  1597. """,
  1598. )
  1599. add_docstr(
  1600. torch.unsafe_split,
  1601. r"""
  1602. unsafe_split(tensor, split_size_or_sections, dim=0) -> List of Tensors
  1603. Works like :func:`torch.split` but without enforcing the autograd restrictions
  1604. on inplace modification of the outputs.
  1605. .. warning::
  1606. This function is safe to use as long as only the input, or only the outputs
  1607. are modified inplace after calling this function. It is user's
  1608. responsibility to ensure that is the case. If both the input and one or more
  1609. of the outputs are modified inplace, gradients computed by autograd will be
  1610. silently incorrect.
  1611. """,
  1612. )
  1613. add_docstr(
  1614. torch.hsplit,
  1615. r"""
  1616. hsplit(input, indices_or_sections) -> List of Tensors
  1617. Splits :attr:`input`, a tensor with one or more dimensions, into multiple tensors
  1618. horizontally according to :attr:`indices_or_sections`. Each split is a view of
  1619. :attr:`input`.
  1620. If :attr:`input` is one dimensional this is equivalent to calling
  1621. torch.tensor_split(input, indices_or_sections, dim=0) (the split dimension is
  1622. zero), and if :attr:`input` has two or more dimensions it's equivalent to calling
  1623. torch.tensor_split(input, indices_or_sections, dim=1) (the split dimension is 1),
  1624. except that if :attr:`indices_or_sections` is an integer it must evenly divide
  1625. the split dimension or a runtime error will be thrown.
  1626. This function is based on NumPy's :func:`numpy.hsplit`.
  1627. Args:
  1628. input (Tensor): tensor to split.
  1629. indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`.
  1630. Example::
  1631. >>> t = torch.arange(16.0).reshape(4,4)
  1632. >>> t
  1633. tensor([[ 0., 1., 2., 3.],
  1634. [ 4., 5., 6., 7.],
  1635. [ 8., 9., 10., 11.],
  1636. [12., 13., 14., 15.]])
  1637. >>> torch.hsplit(t, 2)
  1638. (tensor([[ 0., 1.],
  1639. [ 4., 5.],
  1640. [ 8., 9.],
  1641. [12., 13.]]),
  1642. tensor([[ 2., 3.],
  1643. [ 6., 7.],
  1644. [10., 11.],
  1645. [14., 15.]]))
  1646. >>> torch.hsplit(t, [3, 6])
  1647. (tensor([[ 0., 1., 2.],
  1648. [ 4., 5., 6.],
  1649. [ 8., 9., 10.],
  1650. [12., 13., 14.]]),
  1651. tensor([[ 3.],
  1652. [ 7.],
  1653. [11.],
  1654. [15.]]),
  1655. tensor([], size=(4, 0)))
  1656. """,
  1657. )
  1658. add_docstr(
  1659. torch.vsplit,
  1660. r"""
  1661. vsplit(input, indices_or_sections) -> List of Tensors
  1662. Splits :attr:`input`, a tensor with two or more dimensions, into multiple tensors
  1663. vertically according to :attr:`indices_or_sections`. Each split is a view of
  1664. :attr:`input`.
  1665. This is equivalent to calling torch.tensor_split(input, indices_or_sections, dim=0)
  1666. (the split dimension is 0), except that if :attr:`indices_or_sections` is an integer
  1667. it must evenly divide the split dimension or a runtime error will be thrown.
  1668. This function is based on NumPy's :func:`numpy.vsplit`.
  1669. Args:
  1670. input (Tensor): tensor to split.
  1671. indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`.
  1672. Example::
  1673. >>> t = torch.arange(16.0).reshape(4,4)
  1674. >>> t
  1675. tensor([[ 0., 1., 2., 3.],
  1676. [ 4., 5., 6., 7.],
  1677. [ 8., 9., 10., 11.],
  1678. [12., 13., 14., 15.]])
  1679. >>> torch.vsplit(t, 2)
  1680. (tensor([[0., 1., 2., 3.],
  1681. [4., 5., 6., 7.]]),
  1682. tensor([[ 8., 9., 10., 11.],
  1683. [12., 13., 14., 15.]]))
  1684. >>> torch.vsplit(t, [3, 6])
  1685. (tensor([[ 0., 1., 2., 3.],
  1686. [ 4., 5., 6., 7.],
  1687. [ 8., 9., 10., 11.]]),
  1688. tensor([[12., 13., 14., 15.]]),
  1689. tensor([], size=(0, 4)))
  1690. """,
  1691. )
  1692. add_docstr(
  1693. torch.dsplit,
  1694. r"""
  1695. dsplit(input, indices_or_sections) -> List of Tensors
  1696. Splits :attr:`input`, a tensor with three or more dimensions, into multiple tensors
  1697. depthwise according to :attr:`indices_or_sections`. Each split is a view of
  1698. :attr:`input`.
  1699. This is equivalent to calling torch.tensor_split(input, indices_or_sections, dim=2)
  1700. (the split dimension is 2), except that if :attr:`indices_or_sections` is an integer
  1701. it must evenly divide the split dimension or a runtime error will be thrown.
  1702. This function is based on NumPy's :func:`numpy.dsplit`.
  1703. Args:
  1704. input (Tensor): tensor to split.
  1705. indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`.
  1706. Example::
  1707. >>> t = torch.arange(16.0).reshape(2, 2, 4)
  1708. >>> t
  1709. tensor([[[ 0., 1., 2., 3.],
  1710. [ 4., 5., 6., 7.]],
  1711. [[ 8., 9., 10., 11.],
  1712. [12., 13., 14., 15.]]])
  1713. >>> torch.dsplit(t, 2)
  1714. (tensor([[[ 0., 1.],
  1715. [ 4., 5.]],
  1716. [[ 8., 9.],
  1717. [12., 13.]]]),
  1718. tensor([[[ 2., 3.],
  1719. [ 6., 7.]],
  1720. [[10., 11.],
  1721. [14., 15.]]]))
  1722. >>> torch.dsplit(t, [3, 6])
  1723. (tensor([[[ 0., 1., 2.],
  1724. [ 4., 5., 6.]],
  1725. [[ 8., 9., 10.],
  1726. [12., 13., 14.]]]),
  1727. tensor([[[ 3.],
  1728. [ 7.]],
  1729. [[11.],
  1730. [15.]]]),
  1731. tensor([], size=(2, 2, 0)))
  1732. """,
  1733. )
  1734. add_docstr(
  1735. torch.can_cast,
  1736. r"""
  1737. can_cast(from, to) -> bool
  1738. Determines if a type conversion is allowed under PyTorch casting rules
  1739. described in the type promotion :ref:`documentation <type-promotion-doc>`.
  1740. Args:
  1741. from (dtype): The original :class:`torch.dtype`.
  1742. to (dtype): The target :class:`torch.dtype`.
  1743. Example::
  1744. >>> torch.can_cast(torch.double, torch.float)
  1745. True
  1746. >>> torch.can_cast(torch.float, torch.int)
  1747. False
  1748. """,
  1749. )
  1750. add_docstr(
  1751. torch.corrcoef,
  1752. r"""
  1753. corrcoef(input) -> Tensor
  1754. Estimates the Pearson product-moment correlation coefficient matrix of the variables given by the :attr:`input` matrix,
  1755. where rows are the variables and columns are the observations.
  1756. .. note::
  1757. The correlation coefficient matrix R is computed using the covariance matrix C as given by
  1758. :math:`R_{ij} = \frac{ C_{ij} } { \sqrt{ C_{ii} * C_{jj} } }`
  1759. .. note::
  1760. Due to floating point rounding, the resulting array may not be Hermitian and its diagonal elements may not be 1.
  1761. The real and imaginary values are clipped to the interval [-1, 1] in an attempt to improve this situation.
  1762. Args:
  1763. input (Tensor): A 2D matrix containing multiple variables and observations, or a
  1764. Scalar or 1D vector representing a single variable.
  1765. Returns:
  1766. (Tensor) The correlation coefficient matrix of the variables.
  1767. .. seealso::
  1768. :func:`torch.cov` covariance matrix.
  1769. Example::
  1770. >>> x = torch.tensor([[0, 1, 2], [2, 1, 0]])
  1771. >>> torch.corrcoef(x)
  1772. tensor([[ 1., -1.],
  1773. [-1., 1.]])
  1774. >>> x = torch.randn(2, 4)
  1775. >>> x
  1776. tensor([[-0.2678, -0.0908, -0.3766, 0.2780],
  1777. [-0.5812, 0.1535, 0.2387, 0.2350]])
  1778. >>> torch.corrcoef(x)
  1779. tensor([[1.0000, 0.3582],
  1780. [0.3582, 1.0000]])
  1781. >>> torch.corrcoef(x[0])
  1782. tensor(1.)
  1783. """,
  1784. )
  1785. add_docstr(
  1786. torch.cov,
  1787. r"""
  1788. cov(input, *, correction=1, fweights=None, aweights=None) -> Tensor
  1789. Estimates the covariance matrix of the variables given by the :attr:`input` matrix, where rows are
  1790. the variables and columns are the observations.
  1791. A covariance matrix is a square matrix giving the covariance of each pair of variables. The diagonal contains
  1792. the variance of each variable (covariance of a variable with itself). By definition, if :attr:`input` represents
  1793. a single variable (Scalar or 1D) then its variance is returned.
  1794. The unbiased sample covariance of the variables :math:`x` and :math:`y` is given by:
  1795. .. math::
  1796. \text{cov}_w(x,y) = \frac{\sum^{N}_{i = 1}(x_{i} - \bar{x})(y_{i} - \bar{y})}{N~-~1}
  1797. where :math:`\bar{x}` and :math:`\bar{y}` are the simple means of the :math:`x` and :math:`y` respectively.
  1798. If :attr:`fweights` and/or :attr:`aweights` are provided, the unbiased weighted covariance
  1799. is calculated, which is given by:
  1800. .. math::
  1801. \text{cov}_w(x,y) = \frac{\sum^{N}_{i = 1}w_i(x_{i} - \mu_x^*)(y_{i} - \mu_y^*)}{\sum^{N}_{i = 1}w_i~-~1}
  1802. where :math:`w` denotes :attr:`fweights` or :attr:`aweights` based on whichever is provided, or
  1803. :math:`w = fweights \times aweights` if both are provided, and
  1804. :math:`\mu_x^* = \frac{\sum^{N}_{i = 1}w_ix_{i} }{\sum^{N}_{i = 1}w_i}` is the weighted mean of the variable.
  1805. Args:
  1806. input (Tensor): A 2D matrix containing multiple variables and observations, or a
  1807. Scalar or 1D vector representing a single variable.
  1808. Keyword Args:
  1809. correction (int, optional): difference between the sample size and sample degrees of freedom.
  1810. Defaults to Bessel's correction, ``correction = 1`` which returns the unbiased estimate,
  1811. even if both :attr:`fweights` and :attr:`aweights` are specified. ``correction = 0``
  1812. will return the simple average. Defaults to ``1``.
  1813. fweights (tensor, optional): A Scalar or 1D tensor of observation vector frequencies representing the number of
  1814. times each observation should be repeated. Its numel must equal the number of columns of :attr:`input`.
  1815. Must have integral dtype. Ignored if ``None``. `Defaults to ``None``.
  1816. aweights (tensor, optional): A Scalar or 1D array of observation vector weights.
  1817. These relative weights are typically large for observations considered “important” and smaller for
  1818. observations considered less “important”. Its numel must equal the number of columns of :attr:`input`.
  1819. Must have floating point dtype. Ignored if ``None``. `Defaults to ``None``.
  1820. Returns:
  1821. (Tensor) The covariance matrix of the variables.
  1822. .. seealso::
  1823. :func:`torch.corrcoef` normalized covariance matrix.
  1824. Example::
  1825. >>> x = torch.tensor([[0, 2], [1, 1], [2, 0]]).T
  1826. >>> x
  1827. tensor([[0, 1, 2],
  1828. [2, 1, 0]])
  1829. >>> torch.cov(x)
  1830. tensor([[ 1., -1.],
  1831. [-1., 1.]])
  1832. >>> torch.cov(x, correction=0)
  1833. tensor([[ 0.6667, -0.6667],
  1834. [-0.6667, 0.6667]])
  1835. >>> fw = torch.randint(1, 10, (3,))
  1836. >>> fw
  1837. tensor([1, 6, 9])
  1838. >>> aw = torch.rand(3)
  1839. >>> aw
  1840. tensor([0.4282, 0.0255, 0.4144])
  1841. >>> torch.cov(x, fweights=fw, aweights=aw)
  1842. tensor([[ 0.4169, -0.4169],
  1843. [-0.4169, 0.4169]])
  1844. """,
  1845. )
  1846. add_docstr(
  1847. torch.cat,
  1848. r"""
  1849. cat(tensors, dim=0, *, out=None) -> Tensor
  1850. Concatenates the given sequence of :attr:`seq` tensors in the given dimension.
  1851. All tensors must either have the same shape (except in the concatenating
  1852. dimension) or be empty.
  1853. :func:`torch.cat` can be seen as an inverse operation for :func:`torch.split`
  1854. and :func:`torch.chunk`.
  1855. :func:`torch.cat` can be best understood via examples.
  1856. Args:
  1857. tensors (sequence of Tensors): any python sequence of tensors of the same type.
  1858. Non-empty tensors provided must have the same shape, except in the
  1859. cat dimension.
  1860. dim (int, optional): the dimension over which the tensors are concatenated
  1861. Keyword args:
  1862. {out}
  1863. Example::
  1864. >>> x = torch.randn(2, 3)
  1865. >>> x
  1866. tensor([[ 0.6580, -1.0969, -0.4614],
  1867. [-0.1034, -0.5790, 0.1497]])
  1868. >>> torch.cat((x, x, x), 0)
  1869. tensor([[ 0.6580, -1.0969, -0.4614],
  1870. [-0.1034, -0.5790, 0.1497],
  1871. [ 0.6580, -1.0969, -0.4614],
  1872. [-0.1034, -0.5790, 0.1497],
  1873. [ 0.6580, -1.0969, -0.4614],
  1874. [-0.1034, -0.5790, 0.1497]])
  1875. >>> torch.cat((x, x, x), 1)
  1876. tensor([[ 0.6580, -1.0969, -0.4614, 0.6580, -1.0969, -0.4614, 0.6580,
  1877. -1.0969, -0.4614],
  1878. [-0.1034, -0.5790, 0.1497, -0.1034, -0.5790, 0.1497, -0.1034,
  1879. -0.5790, 0.1497]])
  1880. """.format(
  1881. **common_args
  1882. ),
  1883. )
  1884. add_docstr(
  1885. torch.concat,
  1886. r"""
  1887. concat(tensors, dim=0, *, out=None) -> Tensor
  1888. Alias of :func:`torch.cat`.
  1889. """,
  1890. )
  1891. add_docstr(
  1892. torch.concatenate,
  1893. r"""
  1894. concatenate(tensors, axis=0, out=None) -> Tensor
  1895. Alias of :func:`torch.cat`.
  1896. """,
  1897. )
  1898. add_docstr(
  1899. torch.ceil,
  1900. r"""
  1901. ceil(input, *, out=None) -> Tensor
  1902. Returns a new tensor with the ceil of the elements of :attr:`input`,
  1903. the smallest integer greater than or equal to each element.
  1904. For integer inputs, follows the array-api convention of returning a
  1905. copy of the input tensor.
  1906. .. math::
  1907. \text{out}_{i} = \left\lceil \text{input}_{i} \right\rceil
  1908. """
  1909. + r"""
  1910. Args:
  1911. {input}
  1912. Keyword args:
  1913. {out}
  1914. Example::
  1915. >>> a = torch.randn(4)
  1916. >>> a
  1917. tensor([-0.6341, -1.4208, -1.0900, 0.5826])
  1918. >>> torch.ceil(a)
  1919. tensor([-0., -1., -1., 1.])
  1920. """.format(
  1921. **common_args
  1922. ),
  1923. )
  1924. add_docstr(
  1925. torch.real,
  1926. r"""
  1927. real(input) -> Tensor
  1928. Returns a new tensor containing real values of the :attr:`self` tensor.
  1929. The returned tensor and :attr:`self` share the same underlying storage.
  1930. Args:
  1931. {input}
  1932. Example::
  1933. >>> x=torch.randn(4, dtype=torch.cfloat)
  1934. >>> x
  1935. tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)])
  1936. >>> x.real
  1937. tensor([ 0.3100, -0.5445, -1.6492, -0.0638])
  1938. """.format(
  1939. **common_args
  1940. ),
  1941. )
  1942. add_docstr(
  1943. torch.imag,
  1944. r"""
  1945. imag(input) -> Tensor
  1946. Returns a new tensor containing imaginary values of the :attr:`self` tensor.
  1947. The returned tensor and :attr:`self` share the same underlying storage.
  1948. .. warning::
  1949. :func:`imag` is only supported for tensors with complex dtypes.
  1950. Args:
  1951. {input}
  1952. Example::
  1953. >>> x=torch.randn(4, dtype=torch.cfloat)
  1954. >>> x
  1955. tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)])
  1956. >>> x.imag
  1957. tensor([ 0.3553, -0.7896, -0.0633, -0.8119])
  1958. """.format(
  1959. **common_args
  1960. ),
  1961. )
  1962. add_docstr(
  1963. torch.view_as_real,
  1964. r"""
  1965. view_as_real(input) -> Tensor
  1966. Returns a view of :attr:`input` as a real tensor. For an input complex tensor of
  1967. :attr:`size` :math:`m1, m2, \dots, mi`, this function returns a new
  1968. real tensor of size :math:`m1, m2, \dots, mi, 2`, where the last dimension of size 2
  1969. represents the real and imaginary components of complex numbers.
  1970. .. warning::
  1971. :func:`view_as_real` is only supported for tensors with ``complex dtypes``.
  1972. Args:
  1973. {input}
  1974. Example::
  1975. >>> x=torch.randn(4, dtype=torch.cfloat)
  1976. >>> x
  1977. tensor([(0.4737-0.3839j), (-0.2098-0.6699j), (0.3470-0.9451j), (-0.5174-1.3136j)])
  1978. >>> torch.view_as_real(x)
  1979. tensor([[ 0.4737, -0.3839],
  1980. [-0.2098, -0.6699],
  1981. [ 0.3470, -0.9451],
  1982. [-0.5174, -1.3136]])
  1983. """.format(
  1984. **common_args
  1985. ),
  1986. )
  1987. add_docstr(
  1988. torch.view_as_complex,
  1989. r"""
  1990. view_as_complex(input) -> Tensor
  1991. Returns a view of :attr:`input` as a complex tensor. For an input complex
  1992. tensor of :attr:`size` :math:`m1, m2, \dots, mi, 2`, this function returns a
  1993. new complex tensor of :attr:`size` :math:`m1, m2, \dots, mi` where the last
  1994. dimension of the input tensor is expected to represent the real and imaginary
  1995. components of complex numbers.
  1996. .. warning::
  1997. :func:`view_as_complex` is only supported for tensors with
  1998. :class:`torch.dtype` ``torch.float64`` and ``torch.float32``. The input is
  1999. expected to have the last dimension of :attr:`size` 2. In addition, the
  2000. tensor must have a `stride` of 1 for its last dimension. The strides of all
  2001. other dimensions must be even numbers.
  2002. Args:
  2003. {input}
  2004. Example::
  2005. >>> x=torch.randn(4, 2)
  2006. >>> x
  2007. tensor([[ 1.6116, -0.5772],
  2008. [-1.4606, -0.9120],
  2009. [ 0.0786, -1.7497],
  2010. [-0.6561, -1.6623]])
  2011. >>> torch.view_as_complex(x)
  2012. tensor([(1.6116-0.5772j), (-1.4606-0.9120j), (0.0786-1.7497j), (-0.6561-1.6623j)])
  2013. """.format(
  2014. **common_args
  2015. ),
  2016. )
  2017. add_docstr(
  2018. torch.reciprocal,
  2019. r"""
  2020. reciprocal(input, *, out=None) -> Tensor
  2021. Returns a new tensor with the reciprocal of the elements of :attr:`input`
  2022. .. math::
  2023. \text{out}_{i} = \frac{1}{\text{input}_{i}}
  2024. .. note::
  2025. Unlike NumPy's reciprocal, torch.reciprocal supports integral inputs. Integral
  2026. inputs to reciprocal are automatically :ref:`promoted <type-promotion-doc>` to
  2027. the default scalar type.
  2028. """
  2029. + r"""
  2030. Args:
  2031. {input}
  2032. Keyword args:
  2033. {out}
  2034. Example::
  2035. >>> a = torch.randn(4)
  2036. >>> a
  2037. tensor([-0.4595, -2.1219, -1.4314, 0.7298])
  2038. >>> torch.reciprocal(a)
  2039. tensor([-2.1763, -0.4713, -0.6986, 1.3702])
  2040. """.format(
  2041. **common_args
  2042. ),
  2043. )
  2044. add_docstr(
  2045. torch.cholesky,
  2046. r"""
  2047. cholesky(input, upper=False, *, out=None) -> Tensor
  2048. Computes the Cholesky decomposition of a symmetric positive-definite
  2049. matrix :math:`A` or for batches of symmetric positive-definite matrices.
  2050. If :attr:`upper` is ``True``, the returned matrix ``U`` is upper-triangular, and
  2051. the decomposition has the form:
  2052. .. math::
  2053. A = U^TU
  2054. If :attr:`upper` is ``False``, the returned matrix ``L`` is lower-triangular, and
  2055. the decomposition has the form:
  2056. .. math::
  2057. A = LL^T
  2058. If :attr:`upper` is ``True``, and :math:`A` is a batch of symmetric positive-definite
  2059. matrices, then the returned tensor will be composed of upper-triangular Cholesky factors
  2060. of each of the individual matrices. Similarly, when :attr:`upper` is ``False``, the returned
  2061. tensor will be composed of lower-triangular Cholesky factors of each of the individual
  2062. matrices.
  2063. .. warning::
  2064. :func:`torch.cholesky` is deprecated in favor of :func:`torch.linalg.cholesky`
  2065. and will be removed in a future PyTorch release.
  2066. ``L = torch.cholesky(A)`` should be replaced with
  2067. .. code:: python
  2068. L = torch.linalg.cholesky(A)
  2069. ``U = torch.cholesky(A, upper=True)`` should be replaced with
  2070. .. code:: python
  2071. U = torch.linalg.cholesky(A).mH
  2072. This transform will produce equivalent results for all valid (symmetric positive definite) inputs.
  2073. Args:
  2074. input (Tensor): the input tensor :math:`A` of size :math:`(*, n, n)` where `*` is zero or more
  2075. batch dimensions consisting of symmetric positive-definite matrices.
  2076. upper (bool, optional): flag that indicates whether to return a
  2077. upper or lower triangular matrix. Default: ``False``
  2078. Keyword args:
  2079. out (Tensor, optional): the output matrix
  2080. Example::
  2081. >>> a = torch.randn(3, 3)
  2082. >>> a = a @ a.mT + 1e-3 # make symmetric positive-definite
  2083. >>> l = torch.cholesky(a)
  2084. >>> a
  2085. tensor([[ 2.4112, -0.7486, 1.4551],
  2086. [-0.7486, 1.3544, 0.1294],
  2087. [ 1.4551, 0.1294, 1.6724]])
  2088. >>> l
  2089. tensor([[ 1.5528, 0.0000, 0.0000],
  2090. [-0.4821, 1.0592, 0.0000],
  2091. [ 0.9371, 0.5487, 0.7023]])
  2092. >>> l @ l.mT
  2093. tensor([[ 2.4112, -0.7486, 1.4551],
  2094. [-0.7486, 1.3544, 0.1294],
  2095. [ 1.4551, 0.1294, 1.6724]])
  2096. >>> a = torch.randn(3, 2, 2) # Example for batched input
  2097. >>> a = a @ a.mT + 1e-03 # make symmetric positive-definite
  2098. >>> l = torch.cholesky(a)
  2099. >>> z = l @ l.mT
  2100. >>> torch.dist(z, a)
  2101. tensor(2.3842e-07)
  2102. """,
  2103. )
  2104. add_docstr(
  2105. torch.cholesky_solve,
  2106. r"""
  2107. cholesky_solve(input, input2, upper=False, *, out=None) -> Tensor
  2108. Solves a linear system of equations with a positive semidefinite
  2109. matrix to be inverted given its Cholesky factor matrix :math:`u`.
  2110. If :attr:`upper` is ``False``, :math:`u` is and lower triangular and `c` is
  2111. returned such that:
  2112. .. math::
  2113. c = (u u^T)^{{-1}} b
  2114. If :attr:`upper` is ``True`` or not provided, :math:`u` is upper triangular
  2115. and `c` is returned such that:
  2116. .. math::
  2117. c = (u^T u)^{{-1}} b
  2118. `torch.cholesky_solve(b, u)` can take in 2D inputs `b, u` or inputs that are
  2119. batches of 2D matrices. If the inputs are batches, then returns
  2120. batched outputs `c`
  2121. Supports real-valued and complex-valued inputs.
  2122. For the complex-valued inputs the transpose operator above is the conjugate transpose.
  2123. Args:
  2124. input (Tensor): input matrix :math:`b` of size :math:`(*, m, k)`,
  2125. where :math:`*` is zero or more batch dimensions
  2126. input2 (Tensor): input matrix :math:`u` of size :math:`(*, m, m)`,
  2127. where :math:`*` is zero of more batch dimensions composed of
  2128. upper or lower triangular Cholesky factor
  2129. upper (bool, optional): whether to consider the Cholesky factor as a
  2130. lower or upper triangular matrix. Default: ``False``.
  2131. Keyword args:
  2132. out (Tensor, optional): the output tensor for `c`
  2133. Example::
  2134. >>> a = torch.randn(3, 3)
  2135. >>> a = torch.mm(a, a.t()) # make symmetric positive definite
  2136. >>> u = torch.linalg.cholesky(a)
  2137. >>> a
  2138. tensor([[ 0.7747, -1.9549, 1.3086],
  2139. [-1.9549, 6.7546, -5.4114],
  2140. [ 1.3086, -5.4114, 4.8733]])
  2141. >>> b = torch.randn(3, 2)
  2142. >>> b
  2143. tensor([[-0.6355, 0.9891],
  2144. [ 0.1974, 1.4706],
  2145. [-0.4115, -0.6225]])
  2146. >>> torch.cholesky_solve(b, u)
  2147. tensor([[ -8.1625, 19.6097],
  2148. [ -5.8398, 14.2387],
  2149. [ -4.3771, 10.4173]])
  2150. >>> torch.mm(a.inverse(), b)
  2151. tensor([[ -8.1626, 19.6097],
  2152. [ -5.8398, 14.2387],
  2153. [ -4.3771, 10.4173]])
  2154. """,
  2155. )
  2156. add_docstr(
  2157. torch.cholesky_inverse,
  2158. r"""
  2159. cholesky_inverse(input, upper=False, *, out=None) -> Tensor
  2160. Computes the inverse of a symmetric positive-definite matrix :math:`A` using its
  2161. Cholesky factor :math:`u`: returns matrix ``inv``. The inverse is computed using
  2162. LAPACK routines ``dpotri`` and ``spotri`` (and the corresponding MAGMA routines).
  2163. If :attr:`upper` is ``False``, :math:`u` is lower triangular
  2164. such that the returned tensor is
  2165. .. math::
  2166. inv = (uu^{{T}})^{{-1}}
  2167. If :attr:`upper` is ``True`` or not provided, :math:`u` is upper
  2168. triangular such that the returned tensor is
  2169. .. math::
  2170. inv = (u^T u)^{{-1}}
  2171. Supports input of float, double, cfloat and cdouble dtypes.
  2172. Also supports batches of matrices, and if :math:`A` is a batch of matrices then the output has the same batch dimensions.
  2173. Args:
  2174. input (Tensor): the input tensor :math:`A` of size :math:`(*, n, n)`,
  2175. consisting of symmetric positive-definite matrices
  2176. where :math:`*` is zero or more batch dimensions.
  2177. upper (bool, optional): flag that indicates whether to return a
  2178. upper or lower triangular matrix. Default: False
  2179. Keyword args:
  2180. out (Tensor, optional): the output tensor for `inv`
  2181. Example::
  2182. >>> a = torch.randn(3, 3)
  2183. >>> a = torch.mm(a, a.t()) + 1e-05 * torch.eye(3) # make symmetric positive definite
  2184. >>> u = torch.linalg.cholesky(a)
  2185. >>> a
  2186. tensor([[ 0.9935, -0.6353, 1.5806],
  2187. [ -0.6353, 0.8769, -1.7183],
  2188. [ 1.5806, -1.7183, 10.6618]])
  2189. >>> torch.cholesky_inverse(u)
  2190. tensor([[ 1.9314, 1.2251, -0.0889],
  2191. [ 1.2251, 2.4439, 0.2122],
  2192. [-0.0889, 0.2122, 0.1412]])
  2193. >>> a.inverse()
  2194. tensor([[ 1.9314, 1.2251, -0.0889],
  2195. [ 1.2251, 2.4439, 0.2122],
  2196. [-0.0889, 0.2122, 0.1412]])
  2197. >>> a = torch.randn(3, 2, 2) # Example for batched input
  2198. >>> a = a @ a.mT + 1e-03 # make symmetric positive-definite
  2199. >>> l = torch.linalg.cholesky(a)
  2200. >>> z = l @ l.mT
  2201. >>> torch.dist(z, a)
  2202. tensor(3.5894e-07)
  2203. """,
  2204. )
  2205. add_docstr(
  2206. torch.clone,
  2207. r"""
  2208. clone(input, *, memory_format=torch.preserve_format) -> Tensor
  2209. Returns a copy of :attr:`input`.
  2210. .. note::
  2211. This function is differentiable, so gradients will flow back from the
  2212. result of this operation to :attr:`input`. To create a tensor without an
  2213. autograd relationship to :attr:`input` see :meth:`~Tensor.detach`.
  2214. Args:
  2215. {input}
  2216. Keyword args:
  2217. {memory_format}
  2218. """.format(
  2219. **common_args
  2220. ),
  2221. )
  2222. add_docstr(
  2223. torch.clamp,
  2224. r"""
  2225. clamp(input, min=None, max=None, *, out=None) -> Tensor
  2226. Clamps all elements in :attr:`input` into the range `[` :attr:`min`, :attr:`max` `]`.
  2227. Letting min_value and max_value be :attr:`min` and :attr:`max`, respectively, this returns:
  2228. .. math::
  2229. y_i = \min(\max(x_i, \text{min\_value}_i), \text{max\_value}_i)
  2230. If :attr:`min` is ``None``, there is no lower bound.
  2231. Or, if :attr:`max` is ``None`` there is no upper bound.
  2232. """
  2233. + r"""
  2234. .. note::
  2235. If :attr:`min` is greater than :attr:`max` :func:`torch.clamp(..., min, max) <torch.clamp>`
  2236. sets all elements in :attr:`input` to the value of :attr:`max`.
  2237. Args:
  2238. {input}
  2239. min (Number or Tensor, optional): lower-bound of the range to be clamped to
  2240. max (Number or Tensor, optional): upper-bound of the range to be clamped to
  2241. Keyword args:
  2242. {out}
  2243. Example::
  2244. >>> a = torch.randn(4)
  2245. >>> a
  2246. tensor([-1.7120, 0.1734, -0.0478, -0.0922])
  2247. >>> torch.clamp(a, min=-0.5, max=0.5)
  2248. tensor([-0.5000, 0.1734, -0.0478, -0.0922])
  2249. >>> min = torch.linspace(-1, 1, steps=4)
  2250. >>> torch.clamp(a, min=min)
  2251. tensor([-1.0000, 0.1734, 0.3333, 1.0000])
  2252. """.format(
  2253. **common_args
  2254. ),
  2255. )
  2256. add_docstr(
  2257. torch.clip,
  2258. r"""
  2259. clip(input, min=None, max=None, *, out=None) -> Tensor
  2260. Alias for :func:`torch.clamp`.
  2261. """,
  2262. )
  2263. add_docstr(
  2264. torch.column_stack,
  2265. r"""
  2266. column_stack(tensors, *, out=None) -> Tensor
  2267. Creates a new tensor by horizontally stacking the tensors in :attr:`tensors`.
  2268. Equivalent to ``torch.hstack(tensors)``, except each zero or one dimensional tensor ``t``
  2269. in :attr:`tensors` is first reshaped into a ``(t.numel(), 1)`` column before being stacked horizontally.
  2270. Args:
  2271. tensors (sequence of Tensors): sequence of tensors to concatenate
  2272. Keyword args:
  2273. {out}
  2274. Example::
  2275. >>> a = torch.tensor([1, 2, 3])
  2276. >>> b = torch.tensor([4, 5, 6])
  2277. >>> torch.column_stack((a, b))
  2278. tensor([[1, 4],
  2279. [2, 5],
  2280. [3, 6]])
  2281. >>> a = torch.arange(5)
  2282. >>> b = torch.arange(10).reshape(5, 2)
  2283. >>> torch.column_stack((a, b, b))
  2284. tensor([[0, 0, 1, 0, 1],
  2285. [1, 2, 3, 2, 3],
  2286. [2, 4, 5, 4, 5],
  2287. [3, 6, 7, 6, 7],
  2288. [4, 8, 9, 8, 9]])
  2289. """.format(
  2290. **common_args
  2291. ),
  2292. )
  2293. add_docstr(
  2294. torch.complex,
  2295. r"""
  2296. complex(real, imag, *, out=None) -> Tensor
  2297. Constructs a complex tensor with its real part equal to :attr:`real` and its
  2298. imaginary part equal to :attr:`imag`.
  2299. Args:
  2300. real (Tensor): The real part of the complex tensor. Must be float or double.
  2301. imag (Tensor): The imaginary part of the complex tensor. Must be same dtype
  2302. as :attr:`real`.
  2303. Keyword args:
  2304. out (Tensor): If the inputs are ``torch.float32``, must be
  2305. ``torch.complex64``. If the inputs are ``torch.float64``, must be
  2306. ``torch.complex128``.
  2307. Example::
  2308. >>> real = torch.tensor([1, 2], dtype=torch.float32)
  2309. >>> imag = torch.tensor([3, 4], dtype=torch.float32)
  2310. >>> z = torch.complex(real, imag)
  2311. >>> z
  2312. tensor([(1.+3.j), (2.+4.j)])
  2313. >>> z.dtype
  2314. torch.complex64
  2315. """,
  2316. )
  2317. add_docstr(
  2318. torch.polar,
  2319. r"""
  2320. polar(abs, angle, *, out=None) -> Tensor
  2321. Constructs a complex tensor whose elements are Cartesian coordinates
  2322. corresponding to the polar coordinates with absolute value :attr:`abs` and angle
  2323. :attr:`angle`.
  2324. .. math::
  2325. \text{out} = \text{abs} \cdot \cos(\text{angle}) + \text{abs} \cdot \sin(\text{angle}) \cdot j
  2326. .. note::
  2327. `torch.polar` is similar to
  2328. `std::polar <https://en.cppreference.com/w/cpp/numeric/complex/polar>`_
  2329. and does not compute the polar decomposition
  2330. of a complex tensor like Python's `cmath.polar` and SciPy's `linalg.polar` do.
  2331. The behavior of this function is undefined if `abs` is negative or NaN, or if `angle` is
  2332. infinite.
  2333. """
  2334. + r"""
  2335. Args:
  2336. abs (Tensor): The absolute value the complex tensor. Must be float or double.
  2337. angle (Tensor): The angle of the complex tensor. Must be same dtype as
  2338. :attr:`abs`.
  2339. Keyword args:
  2340. out (Tensor): If the inputs are ``torch.float32``, must be
  2341. ``torch.complex64``. If the inputs are ``torch.float64``, must be
  2342. ``torch.complex128``.
  2343. Example::
  2344. >>> import numpy as np
  2345. >>> abs = torch.tensor([1, 2], dtype=torch.float64)
  2346. >>> angle = torch.tensor([np.pi / 2, 5 * np.pi / 4], dtype=torch.float64)
  2347. >>> z = torch.polar(abs, angle)
  2348. >>> z
  2349. tensor([(0.0000+1.0000j), (-1.4142-1.4142j)], dtype=torch.complex128)
  2350. """,
  2351. )
  2352. add_docstr(
  2353. torch.conj_physical,
  2354. r"""
  2355. conj_physical(input, *, out=None) -> Tensor
  2356. Computes the element-wise conjugate of the given :attr:`input` tensor.
  2357. If :attr:`input` has a non-complex dtype, this function just returns :attr:`input`.
  2358. .. note::
  2359. This performs the conjugate operation regardless of the fact conjugate bit is set or not.
  2360. .. warning:: In the future, :func:`torch.conj_physical` may return a non-writeable view for an :attr:`input` of
  2361. non-complex dtype. It's recommended that programs not modify the tensor returned by :func:`torch.conj_physical`
  2362. when :attr:`input` is of non-complex dtype to be compatible with this change.
  2363. .. math::
  2364. \text{out}_{i} = conj(\text{input}_{i})
  2365. """
  2366. + r"""
  2367. Args:
  2368. {input}
  2369. Keyword args:
  2370. {out}
  2371. Example::
  2372. >>> torch.conj_physical(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]))
  2373. tensor([-1 - 1j, -2 - 2j, 3 + 3j])
  2374. """.format(
  2375. **common_args
  2376. ),
  2377. )
  2378. add_docstr(
  2379. torch.conj,
  2380. r"""
  2381. conj(input) -> Tensor
  2382. Returns a view of :attr:`input` with a flipped conjugate bit. If :attr:`input` has a non-complex dtype,
  2383. this function just returns :attr:`input`.
  2384. .. note::
  2385. :func:`torch.conj` performs a lazy conjugation, but the actual conjugated tensor can be materialized
  2386. at any time using :func:`torch.resolve_conj`.
  2387. .. warning:: In the future, :func:`torch.conj` may return a non-writeable view for an :attr:`input` of
  2388. non-complex dtype. It's recommended that programs not modify the tensor returned by :func:`torch.conj_physical`
  2389. when :attr:`input` is of non-complex dtype to be compatible with this change.
  2390. Args:
  2391. {input}
  2392. Example::
  2393. >>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])
  2394. >>> x.is_conj()
  2395. False
  2396. >>> y = torch.conj(x)
  2397. >>> y.is_conj()
  2398. True
  2399. """.format(
  2400. **common_args
  2401. ),
  2402. )
  2403. add_docstr(
  2404. torch.resolve_conj,
  2405. r"""
  2406. resolve_conj(input) -> Tensor
  2407. Returns a new tensor with materialized conjugation if :attr:`input`'s conjugate bit is set to `True`,
  2408. else returns :attr:`input`. The output tensor will always have its conjugate bit set to `False`.
  2409. Args:
  2410. {input}
  2411. Example::
  2412. >>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])
  2413. >>> y = x.conj()
  2414. >>> y.is_conj()
  2415. True
  2416. >>> z = y.resolve_conj()
  2417. >>> z
  2418. tensor([-1 - 1j, -2 - 2j, 3 + 3j])
  2419. >>> z.is_conj()
  2420. False
  2421. """.format(
  2422. **common_args
  2423. ),
  2424. )
  2425. add_docstr(
  2426. torch.resolve_neg,
  2427. r"""
  2428. resolve_neg(input) -> Tensor
  2429. Returns a new tensor with materialized negation if :attr:`input`'s negative bit is set to `True`,
  2430. else returns :attr:`input`. The output tensor will always have its negative bit set to `False`.
  2431. Args:
  2432. {input}
  2433. Example::
  2434. >>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])
  2435. >>> y = x.conj()
  2436. >>> z = y.imag
  2437. >>> z.is_neg()
  2438. True
  2439. >>> out = y.resolve_neg()
  2440. >>> out
  2441. tensor([-1, -2, -3])
  2442. >>> out.is_neg()
  2443. False
  2444. """.format(
  2445. **common_args
  2446. ),
  2447. )
  2448. add_docstr(
  2449. torch.copysign,
  2450. r"""
  2451. copysign(input, other, *, out=None) -> Tensor
  2452. Create a new floating-point tensor with the magnitude of :attr:`input` and the sign of :attr:`other`, elementwise.
  2453. .. math::
  2454. \text{out}_{i} = \begin{cases}
  2455. -|\text{input}_{i}| & \text{if } \text{other}_{i} \leq -0.0 \\
  2456. |\text{input}_{i}| & \text{if } \text{other}_{i} \geq 0.0 \\
  2457. \end{cases}
  2458. """
  2459. + r"""
  2460. Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
  2461. and integer and float inputs.
  2462. Args:
  2463. input (Tensor): magnitudes.
  2464. other (Tensor or Number): contains value(s) whose signbit(s) are
  2465. applied to the magnitudes in :attr:`input`.
  2466. Keyword args:
  2467. {out}
  2468. Example::
  2469. >>> a = torch.randn(5)
  2470. >>> a
  2471. tensor([-1.2557, -0.0026, -0.5387, 0.4740, -0.9244])
  2472. >>> torch.copysign(a, 1)
  2473. tensor([1.2557, 0.0026, 0.5387, 0.4740, 0.9244])
  2474. >>> a = torch.randn(4, 4)
  2475. >>> a
  2476. tensor([[ 0.7079, 0.2778, -1.0249, 0.5719],
  2477. [-0.0059, -0.2600, -0.4475, -1.3948],
  2478. [ 0.3667, -0.9567, -2.5757, -0.1751],
  2479. [ 0.2046, -0.0742, 0.2998, -0.1054]])
  2480. >>> b = torch.randn(4)
  2481. tensor([ 0.2373, 0.3120, 0.3190, -1.1128])
  2482. >>> torch.copysign(a, b)
  2483. tensor([[ 0.7079, 0.2778, 1.0249, -0.5719],
  2484. [ 0.0059, 0.2600, 0.4475, -1.3948],
  2485. [ 0.3667, 0.9567, 2.5757, -0.1751],
  2486. [ 0.2046, 0.0742, 0.2998, -0.1054]])
  2487. >>> a = torch.tensor([1.])
  2488. >>> b = torch.tensor([-0.])
  2489. >>> torch.copysign(a, b)
  2490. tensor([-1.])
  2491. .. note::
  2492. copysign handles signed zeros. If the other argument has a negative zero (-0),
  2493. the corresponding output value will be negative.
  2494. """.format(
  2495. **common_args
  2496. ),
  2497. )
  2498. add_docstr(
  2499. torch.cos,
  2500. r"""
  2501. cos(input, *, out=None) -> Tensor
  2502. Returns a new tensor with the cosine of the elements of :attr:`input`.
  2503. .. math::
  2504. \text{out}_{i} = \cos(\text{input}_{i})
  2505. """
  2506. + r"""
  2507. Args:
  2508. {input}
  2509. Keyword args:
  2510. {out}
  2511. Example::
  2512. >>> a = torch.randn(4)
  2513. >>> a
  2514. tensor([ 1.4309, 1.2706, -0.8562, 0.9796])
  2515. >>> torch.cos(a)
  2516. tensor([ 0.1395, 0.2957, 0.6553, 0.5574])
  2517. """.format(
  2518. **common_args
  2519. ),
  2520. )
  2521. add_docstr(
  2522. torch.cosh,
  2523. r"""
  2524. cosh(input, *, out=None) -> Tensor
  2525. Returns a new tensor with the hyperbolic cosine of the elements of
  2526. :attr:`input`.
  2527. .. math::
  2528. \text{out}_{i} = \cosh(\text{input}_{i})
  2529. """
  2530. + r"""
  2531. Args:
  2532. {input}
  2533. Keyword args:
  2534. {out}
  2535. Example::
  2536. >>> a = torch.randn(4)
  2537. >>> a
  2538. tensor([ 0.1632, 1.1835, -0.6979, -0.7325])
  2539. >>> torch.cosh(a)
  2540. tensor([ 1.0133, 1.7860, 1.2536, 1.2805])
  2541. .. note::
  2542. When :attr:`input` is on the CPU, the implementation of torch.cosh may use
  2543. the Sleef library, which rounds very large results to infinity or negative
  2544. infinity. See `here <https://sleef.org/purec.xhtml>`_ for details.
  2545. """.format(
  2546. **common_args
  2547. ),
  2548. )
  2549. add_docstr(
  2550. torch.cross,
  2551. r"""
  2552. cross(input, other, dim=None, *, out=None) -> Tensor
  2553. Returns the cross product of vectors in dimension :attr:`dim` of :attr:`input`
  2554. and :attr:`other`.
  2555. Supports input of float, double, cfloat and cdouble dtypes. Also supports batches
  2556. of vectors, for which it computes the product along the dimension :attr:`dim`.
  2557. In this case, the output has the same batch dimensions as the inputs.
  2558. If :attr:`dim` is not given, it defaults to the first dimension found with the
  2559. size 3. Note that this might be unexpected.
  2560. .. seealso::
  2561. :func:`torch.linalg.cross` which requires specifying dim (defaulting to -1).
  2562. .. warning:: This function may change in a future PyTorch release to match
  2563. the default behaviour in :func:`torch.linalg.cross`. We recommend using
  2564. :func:`torch.linalg.cross`.
  2565. Args:
  2566. {input}
  2567. other (Tensor): the second input tensor
  2568. dim (int, optional): the dimension to take the cross-product in.
  2569. Keyword args:
  2570. {out}
  2571. Example::
  2572. >>> a = torch.randn(4, 3)
  2573. >>> a
  2574. tensor([[-0.3956, 1.1455, 1.6895],
  2575. [-0.5849, 1.3672, 0.3599],
  2576. [-1.1626, 0.7180, -0.0521],
  2577. [-0.1339, 0.9902, -2.0225]])
  2578. >>> b = torch.randn(4, 3)
  2579. >>> b
  2580. tensor([[-0.0257, -1.4725, -1.2251],
  2581. [-1.1479, -0.7005, -1.9757],
  2582. [-1.3904, 0.3726, -1.1836],
  2583. [-0.9688, -0.7153, 0.2159]])
  2584. >>> torch.cross(a, b, dim=1)
  2585. tensor([[ 1.0844, -0.5281, 0.6120],
  2586. [-2.4490, -1.5687, 1.9792],
  2587. [-0.8304, -1.3037, 0.5650],
  2588. [-1.2329, 1.9883, 1.0551]])
  2589. >>> torch.cross(a, b)
  2590. tensor([[ 1.0844, -0.5281, 0.6120],
  2591. [-2.4490, -1.5687, 1.9792],
  2592. [-0.8304, -1.3037, 0.5650],
  2593. [-1.2329, 1.9883, 1.0551]])
  2594. """.format(
  2595. **common_args
  2596. ),
  2597. )
  2598. add_docstr(
  2599. torch.logcumsumexp,
  2600. r"""
  2601. logcumsumexp(input, dim, *, out=None) -> Tensor
  2602. Returns the logarithm of the cumulative summation of the exponentiation of
  2603. elements of :attr:`input` in the dimension :attr:`dim`.
  2604. For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is
  2605. .. math::
  2606. \text{{logcumsumexp}}(x)_{{ij}} = \log \sum\limits_{{j=0}}^{{i}} \exp(x_{{ij}})
  2607. Args:
  2608. {input}
  2609. dim (int): the dimension to do the operation over
  2610. Keyword args:
  2611. {out}
  2612. Example::
  2613. >>> a = torch.randn(10)
  2614. >>> torch.logcumsumexp(a, dim=0)
  2615. tensor([-0.42296738, -0.04462666, 0.86278635, 0.94622083, 1.05277811,
  2616. 1.39202815, 1.83525007, 1.84492621, 2.06084887, 2.06844475]))
  2617. """.format(
  2618. **reduceops_common_args
  2619. ),
  2620. )
  2621. add_docstr(
  2622. torch.cummax,
  2623. r"""
  2624. cummax(input, dim, *, out=None) -> (Tensor, LongTensor)
  2625. Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative maximum of
  2626. elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index
  2627. location of each maximum value found in the dimension :attr:`dim`.
  2628. .. math::
  2629. y_i = max(x_1, x_2, x_3, \dots, x_i)
  2630. Args:
  2631. {input}
  2632. dim (int): the dimension to do the operation over
  2633. Keyword args:
  2634. out (tuple, optional): the result tuple of two output tensors (values, indices)
  2635. Example::
  2636. >>> a = torch.randn(10)
  2637. >>> a
  2638. tensor([-0.3449, -1.5447, 0.0685, -1.5104, -1.1706, 0.2259, 1.4696, -1.3284,
  2639. 1.9946, -0.8209])
  2640. >>> torch.cummax(a, dim=0)
  2641. torch.return_types.cummax(
  2642. values=tensor([-0.3449, -0.3449, 0.0685, 0.0685, 0.0685, 0.2259, 1.4696, 1.4696,
  2643. 1.9946, 1.9946]),
  2644. indices=tensor([0, 0, 2, 2, 2, 5, 6, 6, 8, 8]))
  2645. """.format(
  2646. **reduceops_common_args
  2647. ),
  2648. )
  2649. add_docstr(
  2650. torch.cummin,
  2651. r"""
  2652. cummin(input, dim, *, out=None) -> (Tensor, LongTensor)
  2653. Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative minimum of
  2654. elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index
  2655. location of each maximum value found in the dimension :attr:`dim`.
  2656. .. math::
  2657. y_i = min(x_1, x_2, x_3, \dots, x_i)
  2658. Args:
  2659. {input}
  2660. dim (int): the dimension to do the operation over
  2661. Keyword args:
  2662. out (tuple, optional): the result tuple of two output tensors (values, indices)
  2663. Example::
  2664. >>> a = torch.randn(10)
  2665. >>> a
  2666. tensor([-0.2284, -0.6628, 0.0975, 0.2680, -1.3298, -0.4220, -0.3885, 1.1762,
  2667. 0.9165, 1.6684])
  2668. >>> torch.cummin(a, dim=0)
  2669. torch.return_types.cummin(
  2670. values=tensor([-0.2284, -0.6628, -0.6628, -0.6628, -1.3298, -1.3298, -1.3298, -1.3298,
  2671. -1.3298, -1.3298]),
  2672. indices=tensor([0, 1, 1, 1, 4, 4, 4, 4, 4, 4]))
  2673. """.format(
  2674. **reduceops_common_args
  2675. ),
  2676. )
  2677. add_docstr(
  2678. torch.cumprod,
  2679. r"""
  2680. cumprod(input, dim, *, dtype=None, out=None) -> Tensor
  2681. Returns the cumulative product of elements of :attr:`input` in the dimension
  2682. :attr:`dim`.
  2683. For example, if :attr:`input` is a vector of size N, the result will also be
  2684. a vector of size N, with elements.
  2685. .. math::
  2686. y_i = x_1 \times x_2\times x_3\times \dots \times x_i
  2687. Args:
  2688. {input}
  2689. dim (int): the dimension to do the operation over
  2690. Keyword args:
  2691. {dtype}
  2692. {out}
  2693. Example::
  2694. >>> a = torch.randn(10)
  2695. >>> a
  2696. tensor([ 0.6001, 0.2069, -0.1919, 0.9792, 0.6727, 1.0062, 0.4126,
  2697. -0.2129, -0.4206, 0.1968])
  2698. >>> torch.cumprod(a, dim=0)
  2699. tensor([ 0.6001, 0.1241, -0.0238, -0.0233, -0.0157, -0.0158, -0.0065,
  2700. 0.0014, -0.0006, -0.0001])
  2701. >>> a[5] = 0.0
  2702. >>> torch.cumprod(a, dim=0)
  2703. tensor([ 0.6001, 0.1241, -0.0238, -0.0233, -0.0157, -0.0000, -0.0000,
  2704. 0.0000, -0.0000, -0.0000])
  2705. """.format(
  2706. **reduceops_common_args
  2707. ),
  2708. )
  2709. add_docstr(
  2710. torch.cumsum,
  2711. r"""
  2712. cumsum(input, dim, *, dtype=None, out=None) -> Tensor
  2713. Returns the cumulative sum of elements of :attr:`input` in the dimension
  2714. :attr:`dim`.
  2715. For example, if :attr:`input` is a vector of size N, the result will also be
  2716. a vector of size N, with elements.
  2717. .. math::
  2718. y_i = x_1 + x_2 + x_3 + \dots + x_i
  2719. Args:
  2720. {input}
  2721. dim (int): the dimension to do the operation over
  2722. Keyword args:
  2723. {dtype}
  2724. {out}
  2725. Example::
  2726. >>> a = torch.randn(10)
  2727. >>> a
  2728. tensor([-0.8286, -0.4890, 0.5155, 0.8443, 0.1865, -0.1752, -2.0595,
  2729. 0.1850, -1.1571, -0.4243])
  2730. >>> torch.cumsum(a, dim=0)
  2731. tensor([-0.8286, -1.3175, -0.8020, 0.0423, 0.2289, 0.0537, -2.0058,
  2732. -1.8209, -2.9780, -3.4022])
  2733. """.format(
  2734. **reduceops_common_args
  2735. ),
  2736. )
  2737. add_docstr(
  2738. torch.count_nonzero,
  2739. r"""
  2740. count_nonzero(input, dim=None) -> Tensor
  2741. Counts the number of non-zero values in the tensor :attr:`input` along the given :attr:`dim`.
  2742. If no dim is specified then all non-zeros in the tensor are counted.
  2743. Args:
  2744. {input}
  2745. dim (int or tuple of ints, optional): Dim or tuple of dims along which to count non-zeros.
  2746. Example::
  2747. >>> x = torch.zeros(3,3)
  2748. >>> x[torch.randn(3,3) > 0.5] = 1
  2749. >>> x
  2750. tensor([[0., 1., 1.],
  2751. [0., 0., 0.],
  2752. [0., 0., 1.]])
  2753. >>> torch.count_nonzero(x)
  2754. tensor(3)
  2755. >>> torch.count_nonzero(x, dim=0)
  2756. tensor([0, 1, 2])
  2757. """.format(
  2758. **reduceops_common_args
  2759. ),
  2760. )
  2761. add_docstr(
  2762. torch.dequantize,
  2763. r"""
  2764. dequantize(tensor) -> Tensor
  2765. Returns an fp32 Tensor by dequantizing a quantized Tensor
  2766. Args:
  2767. tensor (Tensor): A quantized Tensor
  2768. .. function:: dequantize(tensors) -> sequence of Tensors
  2769. :noindex:
  2770. Given a list of quantized Tensors, dequantize them and return a list of fp32 Tensors
  2771. Args:
  2772. tensors (sequence of Tensors): A list of quantized Tensors
  2773. """,
  2774. )
  2775. add_docstr(
  2776. torch.diag,
  2777. r"""
  2778. diag(input, diagonal=0, *, out=None) -> Tensor
  2779. - If :attr:`input` is a vector (1-D tensor), then returns a 2-D square tensor
  2780. with the elements of :attr:`input` as the diagonal.
  2781. - If :attr:`input` is a matrix (2-D tensor), then returns a 1-D tensor with
  2782. the diagonal elements of :attr:`input`.
  2783. The argument :attr:`diagonal` controls which diagonal to consider:
  2784. - If :attr:`diagonal` = 0, it is the main diagonal.
  2785. - If :attr:`diagonal` > 0, it is above the main diagonal.
  2786. - If :attr:`diagonal` < 0, it is below the main diagonal.
  2787. Args:
  2788. {input}
  2789. diagonal (int, optional): the diagonal to consider
  2790. Keyword args:
  2791. {out}
  2792. .. seealso::
  2793. :func:`torch.diagonal` always returns the diagonal of its input.
  2794. :func:`torch.diagflat` always constructs a tensor with diagonal elements
  2795. specified by the input.
  2796. Examples:
  2797. Get the square matrix where the input vector is the diagonal::
  2798. >>> a = torch.randn(3)
  2799. >>> a
  2800. tensor([ 0.5950,-0.0872, 2.3298])
  2801. >>> torch.diag(a)
  2802. tensor([[ 0.5950, 0.0000, 0.0000],
  2803. [ 0.0000,-0.0872, 0.0000],
  2804. [ 0.0000, 0.0000, 2.3298]])
  2805. >>> torch.diag(a, 1)
  2806. tensor([[ 0.0000, 0.5950, 0.0000, 0.0000],
  2807. [ 0.0000, 0.0000,-0.0872, 0.0000],
  2808. [ 0.0000, 0.0000, 0.0000, 2.3298],
  2809. [ 0.0000, 0.0000, 0.0000, 0.0000]])
  2810. Get the k-th diagonal of a given matrix::
  2811. >>> a = torch.randn(3, 3)
  2812. >>> a
  2813. tensor([[-0.4264, 0.0255,-0.1064],
  2814. [ 0.8795,-0.2429, 0.1374],
  2815. [ 0.1029,-0.6482,-1.6300]])
  2816. >>> torch.diag(a, 0)
  2817. tensor([-0.4264,-0.2429,-1.6300])
  2818. >>> torch.diag(a, 1)
  2819. tensor([ 0.0255, 0.1374])
  2820. """.format(
  2821. **common_args
  2822. ),
  2823. )
  2824. add_docstr(
  2825. torch.diag_embed,
  2826. r"""
  2827. diag_embed(input, offset=0, dim1=-2, dim2=-1) -> Tensor
  2828. Creates a tensor whose diagonals of certain 2D planes (specified by
  2829. :attr:`dim1` and :attr:`dim2`) are filled by :attr:`input`.
  2830. To facilitate creating batched diagonal matrices, the 2D planes formed by
  2831. the last two dimensions of the returned tensor are chosen by default.
  2832. The argument :attr:`offset` controls which diagonal to consider:
  2833. - If :attr:`offset` = 0, it is the main diagonal.
  2834. - If :attr:`offset` > 0, it is above the main diagonal.
  2835. - If :attr:`offset` < 0, it is below the main diagonal.
  2836. The size of the new matrix will be calculated to make the specified diagonal
  2837. of the size of the last input dimension.
  2838. Note that for :attr:`offset` other than :math:`0`, the order of :attr:`dim1`
  2839. and :attr:`dim2` matters. Exchanging them is equivalent to changing the
  2840. sign of :attr:`offset`.
  2841. Applying :meth:`torch.diagonal` to the output of this function with
  2842. the same arguments yields a matrix identical to input. However,
  2843. :meth:`torch.diagonal` has different default dimensions, so those
  2844. need to be explicitly specified.
  2845. Args:
  2846. {input} Must be at least 1-dimensional.
  2847. offset (int, optional): which diagonal to consider. Default: 0
  2848. (main diagonal).
  2849. dim1 (int, optional): first dimension with respect to which to
  2850. take diagonal. Default: -2.
  2851. dim2 (int, optional): second dimension with respect to which to
  2852. take diagonal. Default: -1.
  2853. Example::
  2854. >>> a = torch.randn(2, 3)
  2855. >>> torch.diag_embed(a)
  2856. tensor([[[ 1.5410, 0.0000, 0.0000],
  2857. [ 0.0000, -0.2934, 0.0000],
  2858. [ 0.0000, 0.0000, -2.1788]],
  2859. [[ 0.5684, 0.0000, 0.0000],
  2860. [ 0.0000, -1.0845, 0.0000],
  2861. [ 0.0000, 0.0000, -1.3986]]])
  2862. >>> torch.diag_embed(a, offset=1, dim1=0, dim2=2)
  2863. tensor([[[ 0.0000, 1.5410, 0.0000, 0.0000],
  2864. [ 0.0000, 0.5684, 0.0000, 0.0000]],
  2865. [[ 0.0000, 0.0000, -0.2934, 0.0000],
  2866. [ 0.0000, 0.0000, -1.0845, 0.0000]],
  2867. [[ 0.0000, 0.0000, 0.0000, -2.1788],
  2868. [ 0.0000, 0.0000, 0.0000, -1.3986]],
  2869. [[ 0.0000, 0.0000, 0.0000, 0.0000],
  2870. [ 0.0000, 0.0000, 0.0000, 0.0000]]])
  2871. """.format(
  2872. **common_args
  2873. ),
  2874. )
  2875. add_docstr(
  2876. torch.diagflat,
  2877. r"""
  2878. diagflat(input, offset=0) -> Tensor
  2879. - If :attr:`input` is a vector (1-D tensor), then returns a 2-D square tensor
  2880. with the elements of :attr:`input` as the diagonal.
  2881. - If :attr:`input` is a tensor with more than one dimension, then returns a
  2882. 2-D tensor with diagonal elements equal to a flattened :attr:`input`.
  2883. The argument :attr:`offset` controls which diagonal to consider:
  2884. - If :attr:`offset` = 0, it is the main diagonal.
  2885. - If :attr:`offset` > 0, it is above the main diagonal.
  2886. - If :attr:`offset` < 0, it is below the main diagonal.
  2887. Args:
  2888. {input}
  2889. offset (int, optional): the diagonal to consider. Default: 0 (main
  2890. diagonal).
  2891. Examples::
  2892. >>> a = torch.randn(3)
  2893. >>> a
  2894. tensor([-0.2956, -0.9068, 0.1695])
  2895. >>> torch.diagflat(a)
  2896. tensor([[-0.2956, 0.0000, 0.0000],
  2897. [ 0.0000, -0.9068, 0.0000],
  2898. [ 0.0000, 0.0000, 0.1695]])
  2899. >>> torch.diagflat(a, 1)
  2900. tensor([[ 0.0000, -0.2956, 0.0000, 0.0000],
  2901. [ 0.0000, 0.0000, -0.9068, 0.0000],
  2902. [ 0.0000, 0.0000, 0.0000, 0.1695],
  2903. [ 0.0000, 0.0000, 0.0000, 0.0000]])
  2904. >>> a = torch.randn(2, 2)
  2905. >>> a
  2906. tensor([[ 0.2094, -0.3018],
  2907. [-0.1516, 1.9342]])
  2908. >>> torch.diagflat(a)
  2909. tensor([[ 0.2094, 0.0000, 0.0000, 0.0000],
  2910. [ 0.0000, -0.3018, 0.0000, 0.0000],
  2911. [ 0.0000, 0.0000, -0.1516, 0.0000],
  2912. [ 0.0000, 0.0000, 0.0000, 1.9342]])
  2913. """.format(
  2914. **common_args
  2915. ),
  2916. )
  2917. add_docstr(
  2918. torch.diagonal,
  2919. r"""
  2920. diagonal(input, offset=0, dim1=0, dim2=1) -> Tensor
  2921. Returns a partial view of :attr:`input` with the its diagonal elements
  2922. with respect to :attr:`dim1` and :attr:`dim2` appended as a dimension
  2923. at the end of the shape.
  2924. The argument :attr:`offset` controls which diagonal to consider:
  2925. - If :attr:`offset` = 0, it is the main diagonal.
  2926. - If :attr:`offset` > 0, it is above the main diagonal.
  2927. - If :attr:`offset` < 0, it is below the main diagonal.
  2928. Applying :meth:`torch.diag_embed` to the output of this function with
  2929. the same arguments yields a diagonal matrix with the diagonal entries
  2930. of the input. However, :meth:`torch.diag_embed` has different default
  2931. dimensions, so those need to be explicitly specified.
  2932. Args:
  2933. {input} Must be at least 2-dimensional.
  2934. offset (int, optional): which diagonal to consider. Default: 0
  2935. (main diagonal).
  2936. dim1 (int, optional): first dimension with respect to which to
  2937. take diagonal. Default: 0.
  2938. dim2 (int, optional): second dimension with respect to which to
  2939. take diagonal. Default: 1.
  2940. .. note:: To take a batch diagonal, pass in dim1=-2, dim2=-1.
  2941. Examples::
  2942. >>> a = torch.randn(3, 3)
  2943. >>> a
  2944. tensor([[-1.0854, 1.1431, -0.1752],
  2945. [ 0.8536, -0.0905, 0.0360],
  2946. [ 0.6927, -0.3735, -0.4945]])
  2947. >>> torch.diagonal(a, 0)
  2948. tensor([-1.0854, -0.0905, -0.4945])
  2949. >>> torch.diagonal(a, 1)
  2950. tensor([ 1.1431, 0.0360])
  2951. >>> x = torch.randn(2, 5, 4, 2)
  2952. >>> torch.diagonal(x, offset=-1, dim1=1, dim2=2)
  2953. tensor([[[-1.2631, 0.3755, -1.5977, -1.8172],
  2954. [-1.1065, 1.0401, -0.2235, -0.7938]],
  2955. [[-1.7325, -0.3081, 0.6166, 0.2335],
  2956. [ 1.0500, 0.7336, -0.3836, -1.1015]]])
  2957. """.format(
  2958. **common_args
  2959. ),
  2960. )
  2961. add_docstr(
  2962. torch.diagonal_scatter,
  2963. r"""
  2964. diagonal_scatter(input, src, offset=0, dim1=0, dim2=1) -> Tensor
  2965. Embeds the values of the :attr:`src` tensor into :attr:`input` along
  2966. the diagonal elements of :attr:`input`, with respect to :attr:`dim1`
  2967. and :attr:`dim2`.
  2968. This function returns a tensor with fresh storage; it does not
  2969. return a view.
  2970. The argument :attr:`offset` controls which diagonal to consider:
  2971. - If :attr:`offset` = 0, it is the main diagonal.
  2972. - If :attr:`offset` > 0, it is above the main diagonal.
  2973. - If :attr:`offset` < 0, it is below the main diagonal.
  2974. Args:
  2975. {input} Must be at least 2-dimensional.
  2976. src (Tensor): the tensor to embed into :attr:`input`.
  2977. offset (int, optional): which diagonal to consider. Default: 0
  2978. (main diagonal).
  2979. dim1 (int, optional): first dimension with respect to which to
  2980. take diagonal. Default: 0.
  2981. dim2 (int, optional): second dimension with respect to which to
  2982. take diagonal. Default: 1.
  2983. .. note::
  2984. :attr:`src` must be of the proper size in order to be embedded
  2985. into :attr:`input`. Specifically, it should have the same shape as
  2986. ``torch.diagonal(input, offset, dim1, dim2)``
  2987. Examples::
  2988. >>> a = torch.zeros(3, 3)
  2989. >>> a
  2990. tensor([[0., 0., 0.],
  2991. [0., 0., 0.],
  2992. [0., 0., 0.]])
  2993. >>> torch.diagonal_scatter(a, torch.ones(3), 0)
  2994. tensor([[1., 0., 0.],
  2995. [0., 1., 0.],
  2996. [0., 0., 1.]])
  2997. >>> torch.diagonal_scatter(a, torch.ones(2), 1)
  2998. tensor([[0., 1., 0.],
  2999. [0., 0., 1.],
  3000. [0., 0., 0.]])
  3001. """.format(
  3002. **common_args
  3003. ),
  3004. )
  3005. add_docstr(
  3006. torch.as_strided_scatter,
  3007. r"""
  3008. as_strided_scatter(input, src, size, stride, storage_offset=None) -> Tensor
  3009. Embeds the values of the :attr:`src` tensor into :attr:`input` along
  3010. the elements corresponding to the result of calling
  3011. input.as_strided(size, stride, storage_offset).
  3012. This function returns a tensor with fresh storage; it does not
  3013. return a view.
  3014. Args:
  3015. {input}
  3016. size (tuple or ints): the shape of the output tensor
  3017. stride (tuple or ints): the stride of the output tensor
  3018. storage_offset (int, optional): the offset in the underlying storage of the output tensor
  3019. .. note::
  3020. :attr:`src` must be of the proper size in order to be embedded
  3021. into :attr:`input`. Specifically, it should have the same shape as
  3022. `torch.as_strided(input, size, stride, storage_offset)`
  3023. Example::
  3024. >>> a = torch.arange(4).reshape(2, 2) + 1
  3025. >>> a
  3026. tensor([[1, 2],
  3027. [3, 4]])
  3028. >>> b = torch.zeros(3, 3)
  3029. >>> b
  3030. tensor([[0., 0., 0.],
  3031. [0., 0., 0.],
  3032. [0., 0., 0.]])
  3033. >>> torch.as_strided_scatter(b, a, (2, 2), (1, 2))
  3034. tensor([[1., 3., 2.],
  3035. [4., 0., 0.],
  3036. [0., 0., 0.]])
  3037. """.format(
  3038. **common_args
  3039. ),
  3040. )
  3041. add_docstr(
  3042. torch.diff,
  3043. r"""
  3044. diff(input, n=1, dim=-1, prepend=None, append=None) -> Tensor
  3045. Computes the n-th forward difference along the given dimension.
  3046. The first-order differences are given by `out[i] = input[i + 1] - input[i]`. Higher-order
  3047. differences are calculated by using :func:`torch.diff` recursively.
  3048. Args:
  3049. input (Tensor): the tensor to compute the differences on
  3050. n (int, optional): the number of times to recursively compute the difference
  3051. dim (int, optional): the dimension to compute the difference along.
  3052. Default is the last dimension.
  3053. prepend, append (Tensor, optional): values to prepend or append to
  3054. :attr:`input` along :attr:`dim` before computing the difference.
  3055. Their dimensions must be equivalent to that of input, and their shapes
  3056. must match input's shape except on :attr:`dim`.
  3057. Keyword args:
  3058. {out}
  3059. Example::
  3060. >>> a = torch.tensor([1, 3, 2])
  3061. >>> torch.diff(a)
  3062. tensor([ 2, -1])
  3063. >>> b = torch.tensor([4, 5])
  3064. >>> torch.diff(a, append=b)
  3065. tensor([ 2, -1, 2, 1])
  3066. >>> c = torch.tensor([[1, 2, 3], [3, 4, 5]])
  3067. >>> torch.diff(c, dim=0)
  3068. tensor([[2, 2, 2]])
  3069. >>> torch.diff(c, dim=1)
  3070. tensor([[1, 1],
  3071. [1, 1]])
  3072. """.format(
  3073. **common_args
  3074. ),
  3075. )
  3076. add_docstr(
  3077. torch.digamma,
  3078. r"""
  3079. digamma(input, *, out=None) -> Tensor
  3080. Alias for :func:`torch.special.digamma`.
  3081. """,
  3082. )
  3083. add_docstr(
  3084. torch.dist,
  3085. r"""
  3086. dist(input, other, p=2) -> Tensor
  3087. Returns the p-norm of (:attr:`input` - :attr:`other`)
  3088. The shapes of :attr:`input` and :attr:`other` must be
  3089. :ref:`broadcastable <broadcasting-semantics>`.
  3090. Args:
  3091. {input}
  3092. other (Tensor): the Right-hand-side input tensor
  3093. p (float, optional): the norm to be computed
  3094. Example::
  3095. >>> x = torch.randn(4)
  3096. >>> x
  3097. tensor([-1.5393, -0.8675, 0.5916, 1.6321])
  3098. >>> y = torch.randn(4)
  3099. >>> y
  3100. tensor([ 0.0967, -1.0511, 0.6295, 0.8360])
  3101. >>> torch.dist(x, y, 3.5)
  3102. tensor(1.6727)
  3103. >>> torch.dist(x, y, 3)
  3104. tensor(1.6973)
  3105. >>> torch.dist(x, y, 0)
  3106. tensor(4.)
  3107. >>> torch.dist(x, y, 1)
  3108. tensor(2.6537)
  3109. """.format(
  3110. **common_args
  3111. ),
  3112. )
  3113. add_docstr(
  3114. torch.div,
  3115. r"""
  3116. div(input, other, *, rounding_mode=None, out=None) -> Tensor
  3117. Divides each element of the input ``input`` by the corresponding element of
  3118. :attr:`other`.
  3119. .. math::
  3120. \text{{out}}_i = \frac{{\text{{input}}_i}}{{\text{{other}}_i}}
  3121. .. note::
  3122. By default, this performs a "true" division like Python 3.
  3123. See the :attr:`rounding_mode` argument for floor division.
  3124. Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
  3125. :ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
  3126. Always promotes integer types to the default scalar type.
  3127. Args:
  3128. input (Tensor): the dividend
  3129. other (Tensor or Number): the divisor
  3130. Keyword args:
  3131. rounding_mode (str, optional): Type of rounding applied to the result:
  3132. * None - default behavior. Performs no rounding and, if both :attr:`input` and
  3133. :attr:`other` are integer types, promotes the inputs to the default scalar type.
  3134. Equivalent to true division in Python (the ``/`` operator) and NumPy's ``np.true_divide``.
  3135. * ``"trunc"`` - rounds the results of the division towards zero.
  3136. Equivalent to C-style integer division.
  3137. * ``"floor"`` - rounds the results of the division down.
  3138. Equivalent to floor division in Python (the ``//`` operator) and NumPy's ``np.floor_divide``.
  3139. {out}
  3140. Examples::
  3141. >>> x = torch.tensor([ 0.3810, 1.2774, -0.2972, -0.3719, 0.4637])
  3142. >>> torch.div(x, 0.5)
  3143. tensor([ 0.7620, 2.5548, -0.5944, -0.7438, 0.9274])
  3144. >>> a = torch.tensor([[-0.3711, -1.9353, -0.4605, -0.2917],
  3145. ... [ 0.1815, -1.0111, 0.9805, -1.5923],
  3146. ... [ 0.1062, 1.4581, 0.7759, -1.2344],
  3147. ... [-0.1830, -0.0313, 1.1908, -1.4757]])
  3148. >>> b = torch.tensor([ 0.8032, 0.2930, -0.8113, -0.2308])
  3149. >>> torch.div(a, b)
  3150. tensor([[-0.4620, -6.6051, 0.5676, 1.2639],
  3151. [ 0.2260, -3.4509, -1.2086, 6.8990],
  3152. [ 0.1322, 4.9764, -0.9564, 5.3484],
  3153. [-0.2278, -0.1068, -1.4678, 6.3938]])
  3154. >>> torch.div(a, b, rounding_mode='trunc')
  3155. tensor([[-0., -6., 0., 1.],
  3156. [ 0., -3., -1., 6.],
  3157. [ 0., 4., -0., 5.],
  3158. [-0., -0., -1., 6.]])
  3159. >>> torch.div(a, b, rounding_mode='floor')
  3160. tensor([[-1., -7., 0., 1.],
  3161. [ 0., -4., -2., 6.],
  3162. [ 0., 4., -1., 5.],
  3163. [-1., -1., -2., 6.]])
  3164. """.format(
  3165. **common_args
  3166. ),
  3167. )
  3168. add_docstr(
  3169. torch.divide,
  3170. r"""
  3171. divide(input, other, *, rounding_mode=None, out=None) -> Tensor
  3172. Alias for :func:`torch.div`.
  3173. """,
  3174. )
  3175. add_docstr(
  3176. torch.dot,
  3177. r"""
  3178. dot(input, other, *, out=None) -> Tensor
  3179. Computes the dot product of two 1D tensors.
  3180. .. note::
  3181. Unlike NumPy's dot, torch.dot intentionally only supports computing the dot product
  3182. of two 1D tensors with the same number of elements.
  3183. Args:
  3184. input (Tensor): first tensor in the dot product, must be 1D.
  3185. other (Tensor): second tensor in the dot product, must be 1D.
  3186. Keyword args:
  3187. {out}
  3188. Example::
  3189. >>> torch.dot(torch.tensor([2, 3]), torch.tensor([2, 1]))
  3190. tensor(7)
  3191. """.format(
  3192. **common_args
  3193. ),
  3194. )
  3195. add_docstr(
  3196. torch.vdot,
  3197. r"""
  3198. vdot(input, other, *, out=None) -> Tensor
  3199. Computes the dot product of two 1D vectors along a dimension.
  3200. In symbols, this function computes
  3201. .. math::
  3202. \sum_{i=1}^n \overline{x_i}y_i.
  3203. where :math:`\overline{x_i}` denotes the conjugate for complex
  3204. vectors, and it is the identity for real vectors.
  3205. .. note::
  3206. Unlike NumPy's vdot, torch.vdot intentionally only supports computing the dot product
  3207. of two 1D tensors with the same number of elements.
  3208. .. seealso::
  3209. :func:`torch.linalg.vecdot` computes the dot product of two batches of vectors along a dimension.
  3210. Args:
  3211. input (Tensor): first tensor in the dot product, must be 1D. Its conjugate is used if it's complex.
  3212. other (Tensor): second tensor in the dot product, must be 1D.
  3213. Keyword args:
  3214. """
  3215. + rf"""
  3216. .. note:: {common_args["out"]}
  3217. """
  3218. + r"""
  3219. Example::
  3220. >>> torch.vdot(torch.tensor([2, 3]), torch.tensor([2, 1]))
  3221. tensor(7)
  3222. >>> a = torch.tensor((1 +2j, 3 - 1j))
  3223. >>> b = torch.tensor((2 +1j, 4 - 0j))
  3224. >>> torch.vdot(a, b)
  3225. tensor([16.+1.j])
  3226. >>> torch.vdot(b, a)
  3227. tensor([16.-1.j])
  3228. """,
  3229. )
  3230. add_docstr(
  3231. torch.eq,
  3232. r"""
  3233. eq(input, other, *, out=None) -> Tensor
  3234. Computes element-wise equality
  3235. The second argument can be a number or a tensor whose shape is
  3236. :ref:`broadcastable <broadcasting-semantics>` with the first argument.
  3237. Args:
  3238. input (Tensor): the tensor to compare
  3239. other (Tensor or float): the tensor or value to compare
  3240. Keyword args:
  3241. {out}
  3242. Returns:
  3243. A boolean tensor that is True where :attr:`input` is equal to :attr:`other` and False elsewhere
  3244. Example::
  3245. >>> torch.eq(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
  3246. tensor([[ True, False],
  3247. [False, True]])
  3248. """.format(
  3249. **common_args
  3250. ),
  3251. )
  3252. add_docstr(
  3253. torch.equal,
  3254. r"""
  3255. equal(input, other) -> bool
  3256. ``True`` if two tensors have the same size and elements, ``False`` otherwise.
  3257. Example::
  3258. >>> torch.equal(torch.tensor([1, 2]), torch.tensor([1, 2]))
  3259. True
  3260. """,
  3261. )
  3262. add_docstr(
  3263. torch.erf,
  3264. r"""
  3265. erf(input, *, out=None) -> Tensor
  3266. Alias for :func:`torch.special.erf`.
  3267. """,
  3268. )
  3269. add_docstr(
  3270. torch.erfc,
  3271. r"""
  3272. erfc(input, *, out=None) -> Tensor
  3273. Alias for :func:`torch.special.erfc`.
  3274. """,
  3275. )
  3276. add_docstr(
  3277. torch.erfinv,
  3278. r"""
  3279. erfinv(input, *, out=None) -> Tensor
  3280. Alias for :func:`torch.special.erfinv`.
  3281. """,
  3282. )
  3283. add_docstr(
  3284. torch.exp,
  3285. r"""
  3286. exp(input, *, out=None) -> Tensor
  3287. Returns a new tensor with the exponential of the elements
  3288. of the input tensor :attr:`input`.
  3289. .. math::
  3290. y_{i} = e^{x_{i}}
  3291. """
  3292. + r"""
  3293. Args:
  3294. {input}
  3295. Keyword args:
  3296. {out}
  3297. Example::
  3298. >>> torch.exp(torch.tensor([0, math.log(2.)]))
  3299. tensor([ 1., 2.])
  3300. """.format(
  3301. **common_args
  3302. ),
  3303. )
  3304. add_docstr(
  3305. torch.exp2,
  3306. r"""
  3307. exp2(input, *, out=None) -> Tensor
  3308. Alias for :func:`torch.special.exp2`.
  3309. """,
  3310. )
  3311. add_docstr(
  3312. torch.expm1,
  3313. r"""
  3314. expm1(input, *, out=None) -> Tensor
  3315. Alias for :func:`torch.special.expm1`.
  3316. """,
  3317. )
  3318. add_docstr(
  3319. torch.eye,
  3320. r"""
  3321. eye(n, m=None, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  3322. Returns a 2-D tensor with ones on the diagonal and zeros elsewhere.
  3323. Args:
  3324. n (int): the number of rows
  3325. m (int, optional): the number of columns with default being :attr:`n`
  3326. Keyword arguments:
  3327. {out}
  3328. {dtype}
  3329. {layout}
  3330. {device}
  3331. {requires_grad}
  3332. Returns:
  3333. Tensor: A 2-D tensor with ones on the diagonal and zeros elsewhere
  3334. Example::
  3335. >>> torch.eye(3)
  3336. tensor([[ 1., 0., 0.],
  3337. [ 0., 1., 0.],
  3338. [ 0., 0., 1.]])
  3339. """.format(
  3340. **factory_common_args
  3341. ),
  3342. )
  3343. add_docstr(
  3344. torch.floor,
  3345. r"""
  3346. floor(input, *, out=None) -> Tensor
  3347. Returns a new tensor with the floor of the elements of :attr:`input`,
  3348. the largest integer less than or equal to each element.
  3349. For integer inputs, follows the array-api convention of returning a
  3350. copy of the input tensor.
  3351. .. math::
  3352. \text{out}_{i} = \left\lfloor \text{input}_{i} \right\rfloor
  3353. """
  3354. + r"""
  3355. Args:
  3356. {input}
  3357. Keyword args:
  3358. {out}
  3359. Example::
  3360. >>> a = torch.randn(4)
  3361. >>> a
  3362. tensor([-0.8166, 1.5308, -0.2530, -0.2091])
  3363. >>> torch.floor(a)
  3364. tensor([-1., 1., -1., -1.])
  3365. """.format(
  3366. **common_args
  3367. ),
  3368. )
  3369. add_docstr(
  3370. torch.floor_divide,
  3371. r"""
  3372. floor_divide(input, other, *, out=None) -> Tensor
  3373. .. note::
  3374. Before PyTorch 1.13 :func:`torch.floor_divide` incorrectly performed
  3375. truncation division. To restore the previous behavior use
  3376. :func:`torch.div` with ``rounding_mode='trunc'``.
  3377. Computes :attr:`input` divided by :attr:`other`, elementwise, and floors
  3378. the result.
  3379. .. math::
  3380. \text{{out}}_i = \text{floor} \left( \frac{{\text{{input}}_i}}{{\text{{other}}_i}} \right)
  3381. """
  3382. + r"""
  3383. Supports broadcasting to a common shape, type promotion, and integer and float inputs.
  3384. Args:
  3385. input (Tensor or Number): the dividend
  3386. other (Tensor or Number): the divisor
  3387. Keyword args:
  3388. {out}
  3389. Example::
  3390. >>> a = torch.tensor([4.0, 3.0])
  3391. >>> b = torch.tensor([2.0, 2.0])
  3392. >>> torch.floor_divide(a, b)
  3393. tensor([2.0, 1.0])
  3394. >>> torch.floor_divide(a, 1.4)
  3395. tensor([2.0, 2.0])
  3396. """.format(
  3397. **common_args
  3398. ),
  3399. )
  3400. add_docstr(
  3401. torch.fmod,
  3402. r"""
  3403. fmod(input, other, *, out=None) -> Tensor
  3404. Applies C++'s `std::fmod <https://en.cppreference.com/w/cpp/numeric/math/fmod>`_ entrywise.
  3405. The result has the same sign as the dividend :attr:`input` and its absolute value
  3406. is less than that of :attr:`other`.
  3407. This function may be defined in terms of :func:`torch.div` as
  3408. .. code:: python
  3409. torch.fmod(a, b) == a - a.div(b, rounding_mode="trunc") * b
  3410. Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
  3411. :ref:`type promotion <type-promotion-doc>`, and integer and float inputs.
  3412. .. note::
  3413. When the divisor is zero, returns ``NaN`` for floating point dtypes
  3414. on both CPU and GPU; raises ``RuntimeError`` for integer division by
  3415. zero on CPU; Integer division by zero on GPU may return any value.
  3416. .. note::
  3417. Complex inputs are not supported. In some cases, it is not mathematically
  3418. possible to satisfy the definition of a modulo operation with complex numbers.
  3419. .. seealso::
  3420. :func:`torch.remainder` which implements Python's modulus operator.
  3421. This one is defined using division rounding down the result.
  3422. Args:
  3423. input (Tensor): the dividend
  3424. other (Tensor or Scalar): the divisor
  3425. Keyword args:
  3426. {out}
  3427. Example::
  3428. >>> torch.fmod(torch.tensor([-3., -2, -1, 1, 2, 3]), 2)
  3429. tensor([-1., -0., -1., 1., 0., 1.])
  3430. >>> torch.fmod(torch.tensor([1, 2, 3, 4, 5]), -1.5)
  3431. tensor([1.0000, 0.5000, 0.0000, 1.0000, 0.5000])
  3432. """.format(
  3433. **common_args
  3434. ),
  3435. )
  3436. add_docstr(
  3437. torch.frac,
  3438. r"""
  3439. frac(input, *, out=None) -> Tensor
  3440. Computes the fractional portion of each element in :attr:`input`.
  3441. .. math::
  3442. \text{out}_{i} = \text{input}_{i} - \left\lfloor |\text{input}_{i}| \right\rfloor * \operatorname{sgn}(\text{input}_{i})
  3443. Example::
  3444. >>> torch.frac(torch.tensor([1, 2.5, -3.2]))
  3445. tensor([ 0.0000, 0.5000, -0.2000])
  3446. """,
  3447. )
  3448. add_docstr(
  3449. torch.frexp,
  3450. r"""
  3451. frexp(input, *, out=None) -> (Tensor mantissa, Tensor exponent)
  3452. Decomposes :attr:`input` into mantissa and exponent tensors
  3453. such that :math:`\text{input} = \text{mantissa} \times 2^{\text{exponent}}`.
  3454. The range of mantissa is the open interval (-1, 1).
  3455. Supports float inputs.
  3456. Args:
  3457. input (Tensor): the input tensor
  3458. Keyword args:
  3459. out (tuple, optional): the output tensors
  3460. Example::
  3461. >>> x = torch.arange(9.)
  3462. >>> mantissa, exponent = torch.frexp(x)
  3463. >>> mantissa
  3464. tensor([0.0000, 0.5000, 0.5000, 0.7500, 0.5000, 0.6250, 0.7500, 0.8750, 0.5000])
  3465. >>> exponent
  3466. tensor([0, 1, 2, 2, 3, 3, 3, 3, 4], dtype=torch.int32)
  3467. >>> torch.ldexp(mantissa, exponent)
  3468. tensor([0., 1., 2., 3., 4., 5., 6., 7., 8.])
  3469. """,
  3470. )
  3471. add_docstr(
  3472. torch.from_numpy,
  3473. r"""
  3474. from_numpy(ndarray) -> Tensor
  3475. Creates a :class:`Tensor` from a :class:`numpy.ndarray`.
  3476. The returned tensor and :attr:`ndarray` share the same memory. Modifications to
  3477. the tensor will be reflected in the :attr:`ndarray` and vice versa. The returned
  3478. tensor is not resizable.
  3479. It currently accepts :attr:`ndarray` with dtypes of ``numpy.float64``,
  3480. ``numpy.float32``, ``numpy.float16``, ``numpy.complex64``, ``numpy.complex128``,
  3481. ``numpy.int64``, ``numpy.int32``, ``numpy.int16``, ``numpy.int8``, ``numpy.uint8``,
  3482. and ``numpy.bool``.
  3483. .. warning::
  3484. Writing to a tensor created from a read-only NumPy array is not supported and will result in undefined behavior.
  3485. Example::
  3486. >>> a = numpy.array([1, 2, 3])
  3487. >>> t = torch.from_numpy(a)
  3488. >>> t
  3489. tensor([ 1, 2, 3])
  3490. >>> t[0] = -1
  3491. >>> a
  3492. array([-1, 2, 3])
  3493. """,
  3494. )
  3495. add_docstr(
  3496. torch.frombuffer,
  3497. r"""
  3498. frombuffer(buffer, *, dtype, count=-1, offset=0, requires_grad=False) -> Tensor
  3499. Creates a 1-dimensional :class:`Tensor` from an object that implements
  3500. the Python buffer protocol.
  3501. Skips the first :attr:`offset` bytes in the buffer, and interprets the rest of
  3502. the raw bytes as a 1-dimensional tensor of type :attr:`dtype` with :attr:`count`
  3503. elements.
  3504. Note that either of the following must be true:
  3505. 1. :attr:`count` is a positive non-zero number, and the total number of bytes
  3506. in the buffer is less than :attr:`offset` plus :attr:`count` times the size
  3507. (in bytes) of :attr:`dtype`.
  3508. 2. :attr:`count` is negative, and the length (number of bytes) of the buffer
  3509. subtracted by the :attr:`offset` is a multiple of the size (in bytes) of
  3510. :attr:`dtype`.
  3511. The returned tensor and buffer share the same memory. Modifications to
  3512. the tensor will be reflected in the buffer and vice versa. The returned
  3513. tensor is not resizable.
  3514. .. note::
  3515. This function increments the reference count for the object that
  3516. owns the shared memory. Therefore, such memory will not be deallocated
  3517. before the returned tensor goes out of scope.
  3518. .. warning::
  3519. This function's behavior is undefined when passed an object implementing
  3520. the buffer protocol whose data is not on the CPU. Doing so is likely to
  3521. cause a segmentation fault.
  3522. .. warning::
  3523. This function does not try to infer the :attr:`dtype` (hence, it is not
  3524. optional). Passing a different :attr:`dtype` than its source may result
  3525. in unexpected behavior.
  3526. Args:
  3527. buffer (object): a Python object that exposes the buffer interface.
  3528. Keyword args:
  3529. dtype (:class:`torch.dtype`): the desired data type of returned tensor.
  3530. count (int, optional): the number of desired elements to be read.
  3531. If negative, all the elements (until the end of the buffer) will be
  3532. read. Default: -1.
  3533. offset (int, optional): the number of bytes to skip at the start of
  3534. the buffer. Default: 0.
  3535. {requires_grad}
  3536. Example::
  3537. >>> import array
  3538. >>> a = array.array('i', [1, 2, 3])
  3539. >>> t = torch.frombuffer(a, dtype=torch.int32)
  3540. >>> t
  3541. tensor([ 1, 2, 3])
  3542. >>> t[0] = -1
  3543. >>> a
  3544. array([-1, 2, 3])
  3545. >>> # Interprets the signed char bytes as 32-bit integers.
  3546. >>> # Each 4 signed char elements will be interpreted as
  3547. >>> # 1 signed 32-bit integer.
  3548. >>> import array
  3549. >>> a = array.array('b', [-1, 0, 0, 0])
  3550. >>> torch.frombuffer(a, dtype=torch.int32)
  3551. tensor([255], dtype=torch.int32)
  3552. """.format(
  3553. **factory_common_args
  3554. ),
  3555. )
  3556. add_docstr(
  3557. torch.flatten,
  3558. r"""
  3559. flatten(input, start_dim=0, end_dim=-1) -> Tensor
  3560. Flattens :attr:`input` by reshaping it into a one-dimensional tensor. If :attr:`start_dim` or :attr:`end_dim`
  3561. are passed, only dimensions starting with :attr:`start_dim` and ending with :attr:`end_dim` are flattened.
  3562. The order of elements in :attr:`input` is unchanged.
  3563. Unlike NumPy's flatten, which always copies input's data, this function may return the original object, a view,
  3564. or copy. If no dimensions are flattened, then the original object :attr:`input` is returned. Otherwise, if input can
  3565. be viewed as the flattened shape, then that view is returned. Finally, only if the input cannot be viewed as the
  3566. flattened shape is input's data copied. See :meth:`torch.Tensor.view` for details on when a view will be returned.
  3567. .. note::
  3568. Flattening a zero-dimensional tensor will return a one-dimensional view.
  3569. Args:
  3570. {input}
  3571. start_dim (int): the first dim to flatten
  3572. end_dim (int): the last dim to flatten
  3573. Example::
  3574. >>> t = torch.tensor([[[1, 2],
  3575. ... [3, 4]],
  3576. ... [[5, 6],
  3577. ... [7, 8]]])
  3578. >>> torch.flatten(t)
  3579. tensor([1, 2, 3, 4, 5, 6, 7, 8])
  3580. >>> torch.flatten(t, start_dim=1)
  3581. tensor([[1, 2, 3, 4],
  3582. [5, 6, 7, 8]])
  3583. """.format(
  3584. **common_args
  3585. ),
  3586. )
  3587. add_docstr(
  3588. torch.unflatten,
  3589. r"""
  3590. unflatten(input, dim, sizes) -> Tensor
  3591. Expands a dimension of the input tensor over multiple dimensions.
  3592. .. seealso::
  3593. :func:`torch.flatten` the inverse of this function. It coalesces several dimensions into one.
  3594. Args:
  3595. {input}
  3596. dim (int): Dimension to be unflattened, specified as an index into
  3597. ``input.shape``.
  3598. sizes (Tuple[int]): New shape of the unflattened dimension.
  3599. One of its elements can be `-1` in which case the corresponding output
  3600. dimension is inferred. Otherwise, the product of ``sizes`` *must*
  3601. equal ``input.shape[dim]``.
  3602. Returns:
  3603. A View of input with the specified dimension unflattened.
  3604. Examples::
  3605. >>> torch.unflatten(torch.randn(3, 4, 1), 1, (2, 2)).shape
  3606. torch.Size([3, 2, 2, 1])
  3607. >>> torch.unflatten(torch.randn(3, 4, 1), 1, (-1, 2)).shape
  3608. torch.Size([3, 2, 2, 1])
  3609. >>> torch.unflatten(torch.randn(5, 12, 3), -1, (2, 2, 3, 1, 1)).shape
  3610. torch.Size([5, 2, 2, 3, 1, 1, 3])
  3611. """.format(
  3612. **common_args
  3613. ),
  3614. )
  3615. add_docstr(
  3616. torch.gather,
  3617. r"""
  3618. gather(input, dim, index, *, sparse_grad=False, out=None) -> Tensor
  3619. Gathers values along an axis specified by `dim`.
  3620. For a 3-D tensor the output is specified by::
  3621. out[i][j][k] = input[index[i][j][k]][j][k] # if dim == 0
  3622. out[i][j][k] = input[i][index[i][j][k]][k] # if dim == 1
  3623. out[i][j][k] = input[i][j][index[i][j][k]] # if dim == 2
  3624. :attr:`input` and :attr:`index` must have the same number of dimensions.
  3625. It is also required that ``index.size(d) <= input.size(d)`` for all
  3626. dimensions ``d != dim``. :attr:`out` will have the same shape as :attr:`index`.
  3627. Note that ``input`` and ``index`` do not broadcast against each other.
  3628. Args:
  3629. input (Tensor): the source tensor
  3630. dim (int): the axis along which to index
  3631. index (LongTensor): the indices of elements to gather
  3632. Keyword arguments:
  3633. sparse_grad (bool, optional): If ``True``, gradient w.r.t. :attr:`input` will be a sparse tensor.
  3634. out (Tensor, optional): the destination tensor
  3635. Example::
  3636. >>> t = torch.tensor([[1, 2], [3, 4]])
  3637. >>> torch.gather(t, 1, torch.tensor([[0, 0], [1, 0]]))
  3638. tensor([[ 1, 1],
  3639. [ 4, 3]])
  3640. """,
  3641. )
  3642. add_docstr(
  3643. torch.gcd,
  3644. r"""
  3645. gcd(input, other, *, out=None) -> Tensor
  3646. Computes the element-wise greatest common divisor (GCD) of :attr:`input` and :attr:`other`.
  3647. Both :attr:`input` and :attr:`other` must have integer types.
  3648. .. note::
  3649. This defines :math:`gcd(0, 0) = 0`.
  3650. Args:
  3651. {input}
  3652. other (Tensor): the second input tensor
  3653. Keyword arguments:
  3654. {out}
  3655. Example::
  3656. >>> a = torch.tensor([5, 10, 15])
  3657. >>> b = torch.tensor([3, 4, 5])
  3658. >>> torch.gcd(a, b)
  3659. tensor([1, 2, 5])
  3660. >>> c = torch.tensor([3])
  3661. >>> torch.gcd(a, c)
  3662. tensor([1, 1, 3])
  3663. """.format(
  3664. **common_args
  3665. ),
  3666. )
  3667. add_docstr(
  3668. torch.ge,
  3669. r"""
  3670. ge(input, other, *, out=None) -> Tensor
  3671. Computes :math:`\text{input} \geq \text{other}` element-wise.
  3672. """
  3673. + r"""
  3674. The second argument can be a number or a tensor whose shape is
  3675. :ref:`broadcastable <broadcasting-semantics>` with the first argument.
  3676. Args:
  3677. input (Tensor): the tensor to compare
  3678. other (Tensor or float): the tensor or value to compare
  3679. Keyword args:
  3680. {out}
  3681. Returns:
  3682. A boolean tensor that is True where :attr:`input` is greater than or equal to :attr:`other` and False elsewhere
  3683. Example::
  3684. >>> torch.ge(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
  3685. tensor([[True, True], [False, True]])
  3686. """.format(
  3687. **common_args
  3688. ),
  3689. )
  3690. add_docstr(
  3691. torch.greater_equal,
  3692. r"""
  3693. greater_equal(input, other, *, out=None) -> Tensor
  3694. Alias for :func:`torch.ge`.
  3695. """,
  3696. )
  3697. add_docstr(
  3698. torch.gradient,
  3699. r"""
  3700. gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors
  3701. Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in
  3702. one or more dimensions using the `second-order accurate central differences method
  3703. <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_.
  3704. The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not
  3705. specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates
  3706. to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional
  3707. :attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and
  3708. :math:`g(1, 2, 3)\ == input[1, 2, 3]`.
  3709. When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates.
  3710. This is detailed in the "Keyword Arguments" section below.
  3711. The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is
  3712. accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be
  3713. improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative
  3714. is estimated using `Taylor’s theorem with remainder <https://en.wikipedia.org/wiki/Taylor%27s_theorem>`_.
  3715. Letting :math:`x` be an interior point and :math:`x+h_r` be point neighboring it, the partial gradient at
  3716. :math:`f(x+h_r)` is estimated using:
  3717. .. math::
  3718. \begin{aligned}
  3719. f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(x_r)}{6} \\
  3720. \end{aligned}
  3721. where :math:`x_r` is a number in the interval :math:`[x, x+ h_r]` and using the fact that :math:`f \in C^3`
  3722. we derive :
  3723. .. math::
  3724. f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l)
  3725. + ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} }
  3726. .. note::
  3727. We estimate the gradient of functions in complex domain
  3728. :math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way.
  3729. The value of each partial derivative at the boundary points is computed differently. See edge_order below.
  3730. Args:
  3731. input (``Tensor``): the tensor that represents the values of the function
  3732. Keyword args:
  3733. spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify
  3734. how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then
  3735. the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the
  3736. indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding
  3737. indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9).
  3738. Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for
  3739. the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then
  3740. the coordinates are (t0[1], t1[2], t2[3])
  3741. dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default
  3742. the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of
  3743. the :attr:`spacing` argument must correspond with the specified dims."
  3744. edge_order (``int``, optional): 1 or 2, for `first-order
  3745. <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ or
  3746. `second-order <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_
  3747. estimation of the boundary ("edge") values, respectively.
  3748. Examples::
  3749. >>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4]
  3750. >>> coordinates = (torch.tensor([-2., -1., 1., 4.]),)
  3751. >>> values = torch.tensor([4., 1., 1., 16.], )
  3752. >>> torch.gradient(values, spacing = coordinates)
  3753. (tensor([-3., -2., 2., 5.]),)
  3754. >>> # Estimates the gradient of the R^2 -> R function whose samples are
  3755. >>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost
  3756. >>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates
  3757. >>> # partial derivative for both dimensions.
  3758. >>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]])
  3759. >>> torch.gradient(t)
  3760. (tensor([[ 9., 18., 36., 72.],
  3761. [ 9., 18., 36., 72.]]),
  3762. tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
  3763. [10.0000, 15.0000, 30.0000, 40.0000]]))
  3764. >>> # A scalar value for spacing modifies the relationship between tensor indices
  3765. >>> # and input coordinates by multiplying the indices to find the
  3766. >>> # coordinates. For example, below the indices of the innermost
  3767. >>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of
  3768. >>> # the outermost dimension 0, 1 translate to coordinates of [0, 2].
  3769. >>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1])
  3770. (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
  3771. [ 4.5000, 9.0000, 18.0000, 36.0000]]),
  3772. tensor([[ 0.5000, 0.7500, 1.5000, 2.0000],
  3773. [ 5.0000, 7.5000, 15.0000, 20.0000]]))
  3774. >>> # doubling the spacing between samples halves the estimated partial gradients.
  3775. >>>
  3776. >>> # Estimates only the partial derivative for dimension 1
  3777. >>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.)
  3778. (tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
  3779. [10.0000, 15.0000, 30.0000, 40.0000]]),)
  3780. >>> # When spacing is a list of scalars, the relationship between the tensor
  3781. >>> # indices and input coordinates changes based on dimension.
  3782. >>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate
  3783. >>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension
  3784. >>> # 0, 1 translate to coordinates of [0, 2].
  3785. >>> torch.gradient(t, spacing = [3., 2.])
  3786. (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
  3787. [ 4.5000, 9.0000, 18.0000, 36.0000]]),
  3788. tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
  3789. [ 3.3333, 5.0000, 10.0000, 13.3333]]))
  3790. >>> # The following example is a replication of the previous one with explicit
  3791. >>> # coordinates.
  3792. >>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9]))
  3793. >>> torch.gradient(t, spacing = coords)
  3794. (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
  3795. [ 4.5000, 9.0000, 18.0000, 36.0000]]),
  3796. tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
  3797. [ 3.3333, 5.0000, 10.0000, 13.3333]]))
  3798. """,
  3799. )
  3800. add_docstr(
  3801. torch.geqrf,
  3802. r"""
  3803. geqrf(input, *, out=None) -> (Tensor, Tensor)
  3804. This is a low-level function for calling LAPACK's geqrf directly. This function
  3805. returns a namedtuple (a, tau) as defined in `LAPACK documentation for geqrf`_ .
  3806. Computes a QR decomposition of :attr:`input`.
  3807. Both `Q` and `R` matrices are stored in the same output tensor `a`.
  3808. The elements of `R` are stored on and above the diagonal.
  3809. Elementary reflectors (or Householder vectors) implicitly defining matrix `Q`
  3810. are stored below the diagonal.
  3811. The results of this function can be used together with :func:`torch.linalg.householder_product`
  3812. to obtain the `Q` matrix or
  3813. with :func:`torch.ormqr`, which uses an implicit representation of the `Q` matrix,
  3814. for an efficient matrix-matrix multiplication.
  3815. See `LAPACK documentation for geqrf`_ for further details.
  3816. .. note::
  3817. See also :func:`torch.linalg.qr`, which computes Q and R matrices, and :func:`torch.linalg.lstsq`
  3818. with the ``driver="gels"`` option for a function that can solve matrix equations using a QR decomposition.
  3819. Args:
  3820. input (Tensor): the input matrix
  3821. Keyword args:
  3822. out (tuple, optional): the output tuple of (Tensor, Tensor). Ignored if `None`. Default: `None`.
  3823. .. _LAPACK documentation for geqrf:
  3824. http://www.netlib.org/lapack/explore-html/df/dc5/group__variants_g_ecomputational_ga3766ea903391b5cf9008132f7440ec7b.html
  3825. """,
  3826. )
  3827. add_docstr(
  3828. torch.inner,
  3829. r"""
  3830. inner(input, other, *, out=None) -> Tensor
  3831. Computes the dot product for 1D tensors. For higher dimensions, sums the product
  3832. of elements from :attr:`input` and :attr:`other` along their last dimension.
  3833. .. note::
  3834. If either :attr:`input` or :attr:`other` is a scalar, the result is equivalent
  3835. to `torch.mul(input, other)`.
  3836. If both :attr:`input` and :attr:`other` are non-scalars, the size of their last
  3837. dimension must match and the result is equivalent to `torch.tensordot(input,
  3838. other, dims=([-1], [-1]))`
  3839. Args:
  3840. input (Tensor): First input tensor
  3841. other (Tensor): Second input tensor
  3842. Keyword args:
  3843. out (Tensor, optional): Optional output tensor to write result into. The output
  3844. shape is `input.shape[:-1] + other.shape[:-1]`.
  3845. Example::
  3846. # Dot product
  3847. >>> torch.inner(torch.tensor([1, 2, 3]), torch.tensor([0, 2, 1]))
  3848. tensor(7)
  3849. # Multidimensional input tensors
  3850. >>> a = torch.randn(2, 3)
  3851. >>> a
  3852. tensor([[0.8173, 1.0874, 1.1784],
  3853. [0.3279, 0.1234, 2.7894]])
  3854. >>> b = torch.randn(2, 4, 3)
  3855. >>> b
  3856. tensor([[[-0.4682, -0.7159, 0.1506],
  3857. [ 0.4034, -0.3657, 1.0387],
  3858. [ 0.9892, -0.6684, 0.1774],
  3859. [ 0.9482, 1.3261, 0.3917]],
  3860. [[ 0.4537, 0.7493, 1.1724],
  3861. [ 0.2291, 0.5749, -0.2267],
  3862. [-0.7920, 0.3607, -0.3701],
  3863. [ 1.3666, -0.5850, -1.7242]]])
  3864. >>> torch.inner(a, b)
  3865. tensor([[[-0.9837, 1.1560, 0.2907, 2.6785],
  3866. [ 2.5671, 0.5452, -0.6912, -1.5509]],
  3867. [[ 0.1782, 2.9843, 0.7366, 1.5672],
  3868. [ 3.5115, -0.4864, -1.2476, -4.4337]]])
  3869. # Scalar input
  3870. >>> torch.inner(a, torch.tensor(2))
  3871. tensor([[1.6347, 2.1748, 2.3567],
  3872. [0.6558, 0.2469, 5.5787]])
  3873. """,
  3874. )
  3875. add_docstr(
  3876. torch.outer,
  3877. r"""
  3878. outer(input, vec2, *, out=None) -> Tensor
  3879. Outer product of :attr:`input` and :attr:`vec2`.
  3880. If :attr:`input` is a vector of size :math:`n` and :attr:`vec2` is a vector of
  3881. size :math:`m`, then :attr:`out` must be a matrix of size :math:`(n \times m)`.
  3882. .. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
  3883. Args:
  3884. input (Tensor): 1-D input vector
  3885. vec2 (Tensor): 1-D input vector
  3886. Keyword args:
  3887. out (Tensor, optional): optional output matrix
  3888. Example::
  3889. >>> v1 = torch.arange(1., 5.)
  3890. >>> v2 = torch.arange(1., 4.)
  3891. >>> torch.outer(v1, v2)
  3892. tensor([[ 1., 2., 3.],
  3893. [ 2., 4., 6.],
  3894. [ 3., 6., 9.],
  3895. [ 4., 8., 12.]])
  3896. """,
  3897. )
  3898. add_docstr(
  3899. torch.ger,
  3900. r"""
  3901. ger(input, vec2, *, out=None) -> Tensor
  3902. Alias of :func:`torch.outer`.
  3903. .. warning::
  3904. This function is deprecated and will be removed in a future PyTorch release.
  3905. Use :func:`torch.outer` instead.
  3906. """,
  3907. )
  3908. add_docstr(
  3909. torch.get_default_dtype,
  3910. r"""
  3911. get_default_dtype() -> torch.dtype
  3912. Get the current default floating point :class:`torch.dtype`.
  3913. Example::
  3914. >>> torch.get_default_dtype() # initial default for floating point is torch.float32
  3915. torch.float32
  3916. >>> torch.set_default_dtype(torch.float64)
  3917. >>> torch.get_default_dtype() # default is now changed to torch.float64
  3918. torch.float64
  3919. >>> torch.set_default_tensor_type(torch.FloatTensor) # setting tensor type also affects this
  3920. >>> torch.get_default_dtype() # changed to torch.float32, the dtype for torch.FloatTensor
  3921. torch.float32
  3922. """,
  3923. )
  3924. add_docstr(
  3925. torch.get_num_threads,
  3926. r"""
  3927. get_num_threads() -> int
  3928. Returns the number of threads used for parallelizing CPU operations
  3929. """,
  3930. )
  3931. add_docstr(
  3932. torch.get_num_interop_threads,
  3933. r"""
  3934. get_num_interop_threads() -> int
  3935. Returns the number of threads used for inter-op parallelism on CPU
  3936. (e.g. in JIT interpreter)
  3937. """,
  3938. )
  3939. add_docstr(
  3940. torch.gt,
  3941. r"""
  3942. gt(input, other, *, out=None) -> Tensor
  3943. Computes :math:`\text{input} > \text{other}` element-wise.
  3944. """
  3945. + r"""
  3946. The second argument can be a number or a tensor whose shape is
  3947. :ref:`broadcastable <broadcasting-semantics>` with the first argument.
  3948. Args:
  3949. input (Tensor): the tensor to compare
  3950. other (Tensor or float): the tensor or value to compare
  3951. Keyword args:
  3952. {out}
  3953. Returns:
  3954. A boolean tensor that is True where :attr:`input` is greater than :attr:`other` and False elsewhere
  3955. Example::
  3956. >>> torch.gt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
  3957. tensor([[False, True], [False, False]])
  3958. """.format(
  3959. **common_args
  3960. ),
  3961. )
  3962. add_docstr(
  3963. torch.greater,
  3964. r"""
  3965. greater(input, other, *, out=None) -> Tensor
  3966. Alias for :func:`torch.gt`.
  3967. """,
  3968. )
  3969. add_docstr(
  3970. torch.histc,
  3971. r"""
  3972. histc(input, bins=100, min=0, max=0, *, out=None) -> Tensor
  3973. Computes the histogram of a tensor.
  3974. The elements are sorted into equal width bins between :attr:`min` and
  3975. :attr:`max`. If :attr:`min` and :attr:`max` are both zero, the minimum and
  3976. maximum values of the data are used.
  3977. Elements lower than min and higher than max and ``NaN`` elements are ignored.
  3978. Args:
  3979. {input}
  3980. bins (int): number of histogram bins
  3981. min (Scalar): lower end of the range (inclusive)
  3982. max (Scalar): upper end of the range (inclusive)
  3983. Keyword args:
  3984. {out}
  3985. Returns:
  3986. Tensor: Histogram represented as a tensor
  3987. Example::
  3988. >>> torch.histc(torch.tensor([1., 2, 1]), bins=4, min=0, max=3)
  3989. tensor([ 0., 2., 1., 0.])
  3990. """.format(
  3991. **common_args
  3992. ),
  3993. )
  3994. add_docstr(
  3995. torch.histogram,
  3996. r"""
  3997. histogram(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor)
  3998. Computes a histogram of the values in a tensor.
  3999. :attr:`bins` can be an integer or a 1D tensor.
  4000. If :attr:`bins` is an int, it specifies the number of equal-width bins.
  4001. By default, the lower and upper range of the bins is determined by the
  4002. minimum and maximum elements of the input tensor. The :attr:`range`
  4003. argument can be provided to specify a range for the bins.
  4004. If :attr:`bins` is a 1D tensor, it specifies the sequence of bin edges
  4005. including the rightmost edge. It should contain at least 2 elements
  4006. and its elements should be increasing.
  4007. Args:
  4008. {input}
  4009. bins: int or 1D Tensor. If int, defines the number of equal-width bins. If tensor,
  4010. defines the sequence of bin edges including the rightmost edge.
  4011. Keyword args:
  4012. range (tuple of float): Defines the range of the bins.
  4013. weight (Tensor): If provided, weight should have the same shape as input. Each value in
  4014. input contributes its associated weight towards its bin's result.
  4015. density (bool): If False, the result will contain the count (or total weight) in each bin.
  4016. If True, the result is the value of the probability density function over the bins,
  4017. normalized such that the integral over the range of the bins is 1.
  4018. {out} (tuple, optional): The result tuple of two output tensors (hist, bin_edges).
  4019. Returns:
  4020. hist (Tensor): 1D Tensor containing the values of the histogram.
  4021. bin_edges(Tensor): 1D Tensor containing the edges of the histogram bins.
  4022. Example::
  4023. >>> torch.histogram(torch.tensor([1., 2, 1]), bins=4, range=(0., 3.), weight=torch.tensor([1., 2., 4.]))
  4024. (tensor([ 0., 5., 2., 0.]), tensor([0., 0.75, 1.5, 2.25, 3.]))
  4025. >>> torch.histogram(torch.tensor([1., 2, 1]), bins=4, range=(0., 3.), weight=torch.tensor([1., 2., 4.]), density=True)
  4026. (tensor([ 0., 0.9524, 0.3810, 0.]), tensor([0., 0.75, 1.5, 2.25, 3.]))
  4027. """.format(
  4028. **common_args
  4029. ),
  4030. )
  4031. add_docstr(
  4032. torch.histogramdd,
  4033. r"""
  4034. histogramdd(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor[])
  4035. Computes a multi-dimensional histogram of the values in a tensor.
  4036. Interprets the elements of an input tensor whose innermost dimension has size N
  4037. as a collection of N-dimensional points. Maps each of the points into a set of
  4038. N-dimensional bins and returns the number of points (or total weight) in each bin.
  4039. :attr:`input` must be a tensor with at least 2 dimensions.
  4040. If input has shape (M, N), each of its M rows defines a point in N-dimensional space.
  4041. If input has three or more dimensions, all but the last dimension are flattened.
  4042. Each dimension is independently associated with its own strictly increasing sequence
  4043. of bin edges. Bin edges may be specified explicitly by passing a sequence of 1D
  4044. tensors. Alternatively, bin edges may be constructed automatically by passing a
  4045. sequence of integers specifying the number of equal-width bins in each dimension.
  4046. For each N-dimensional point in input:
  4047. - Each of its coordinates is binned independently among the bin edges
  4048. corresponding to its dimension
  4049. - Binning results are combined to identify the N-dimensional bin (if any)
  4050. into which the point falls
  4051. - If the point falls into a bin, the bin's count (or total weight) is incremented
  4052. - Points which do not fall into any bin do not contribute to the output
  4053. :attr:`bins` can be a sequence of N 1D tensors, a sequence of N ints, or a single int.
  4054. If :attr:`bins` is a sequence of N 1D tensors, it explicitly specifies the N sequences
  4055. of bin edges. Each 1D tensor should contain a strictly increasing sequence with at
  4056. least one element. A sequence of K bin edges defines K-1 bins, explicitly specifying
  4057. the left and right edges of all bins. Every bin is exclusive of its left edge. Only
  4058. the rightmost bin is inclusive of its right edge.
  4059. If :attr:`bins` is a sequence of N ints, it specifies the number of equal-width bins
  4060. in each dimension. By default, the leftmost and rightmost bin edges in each dimension
  4061. are determined by the minimum and maximum elements of the input tensor in the
  4062. corresponding dimension. The :attr:`range` argument can be provided to manually
  4063. specify the leftmost and rightmost bin edges in each dimension.
  4064. If :attr:`bins` is an int, it specifies the number of equal-width bins for all dimensions.
  4065. .. note::
  4066. See also :func:`torch.histogram`, which specifically computes 1D histograms.
  4067. While :func:`torch.histogramdd` infers the dimensionality of its bins and
  4068. binned values from the shape of :attr:`input`, :func:`torch.histogram`
  4069. accepts and flattens :attr:`input` of any shape.
  4070. Args:
  4071. {input}
  4072. bins: Tensor[], int[], or int.
  4073. If Tensor[], defines the sequences of bin edges.
  4074. If int[], defines the number of equal-width bins in each dimension.
  4075. If int, defines the number of equal-width bins for all dimensions.
  4076. Keyword args:
  4077. range (sequence of float): Defines the leftmost and rightmost bin edges
  4078. in each dimension.
  4079. weight (Tensor): By default, each value in the input has weight 1. If a weight
  4080. tensor is passed, each N-dimensional coordinate in input
  4081. contributes its associated weight towards its bin's result.
  4082. The weight tensor should have the same shape as the :attr:`input`
  4083. tensor excluding its innermost dimension N.
  4084. density (bool): If False (default), the result will contain the count (or total weight)
  4085. in each bin. If True, each count (weight) is divided by the total count
  4086. (total weight), then divided by the volume of its associated bin.
  4087. Returns:
  4088. hist (Tensor): N-dimensional Tensor containing the values of the histogram.
  4089. bin_edges(Tensor[]): sequence of N 1D Tensors containing the bin edges.
  4090. Example::
  4091. >>> torch.histogramdd(torch.tensor([[0., 1.], [1., 0.], [2., 0.], [2., 2.]]), bins=[3, 3],
  4092. ... weight=torch.tensor([1., 2., 4., 8.]))
  4093. torch.return_types.histogramdd(
  4094. hist=tensor([[0., 1., 0.],
  4095. [2., 0., 0.],
  4096. [4., 0., 8.]]),
  4097. bin_edges=(tensor([0.0000, 0.6667, 1.3333, 2.0000]),
  4098. tensor([0.0000, 0.6667, 1.3333, 2.0000])))
  4099. >>> torch.histogramdd(torch.tensor([[0., 0.], [1., 1.], [2., 2.]]), bins=[2, 2],
  4100. ... range=[0., 1., 0., 1.], density=True)
  4101. torch.return_types.histogramdd(
  4102. hist=tensor([[2., 0.],
  4103. [0., 2.]]),
  4104. bin_edges=(tensor([0.0000, 0.5000, 1.0000]),
  4105. tensor([0.0000, 0.5000, 1.0000])))
  4106. """.format(
  4107. **common_args
  4108. ),
  4109. )
  4110. # TODO: Fix via https://github.com/pytorch/pytorch/issues/75798
  4111. torch.histogramdd.__module__ = "torch"
  4112. add_docstr(
  4113. torch.hypot,
  4114. r"""
  4115. hypot(input, other, *, out=None) -> Tensor
  4116. Given the legs of a right triangle, return its hypotenuse.
  4117. .. math::
  4118. \text{out}_{i} = \sqrt{\text{input}_{i}^{2} + \text{other}_{i}^{2}}
  4119. The shapes of ``input`` and ``other`` must be
  4120. :ref:`broadcastable <broadcasting-semantics>`.
  4121. """
  4122. + r"""
  4123. Args:
  4124. input (Tensor): the first input tensor
  4125. other (Tensor): the second input tensor
  4126. Keyword args:
  4127. {out}
  4128. Example::
  4129. >>> a = torch.hypot(torch.tensor([4.0]), torch.tensor([3.0, 4.0, 5.0]))
  4130. tensor([5.0000, 5.6569, 6.4031])
  4131. """.format(
  4132. **common_args
  4133. ),
  4134. )
  4135. add_docstr(
  4136. torch.i0,
  4137. r"""
  4138. i0(input, *, out=None) -> Tensor
  4139. Alias for :func:`torch.special.i0`.
  4140. """,
  4141. )
  4142. add_docstr(
  4143. torch.igamma,
  4144. r"""
  4145. igamma(input, other, *, out=None) -> Tensor
  4146. Alias for :func:`torch.special.gammainc`.
  4147. """,
  4148. )
  4149. add_docstr(
  4150. torch.igammac,
  4151. r"""
  4152. igammac(input, other, *, out=None) -> Tensor
  4153. Alias for :func:`torch.special.gammaincc`.
  4154. """,
  4155. )
  4156. add_docstr(
  4157. torch.index_select,
  4158. r"""
  4159. index_select(input, dim, index, *, out=None) -> Tensor
  4160. Returns a new tensor which indexes the :attr:`input` tensor along dimension
  4161. :attr:`dim` using the entries in :attr:`index` which is a `LongTensor`.
  4162. The returned tensor has the same number of dimensions as the original tensor
  4163. (:attr:`input`). The :attr:`dim`\ th dimension has the same size as the length
  4164. of :attr:`index`; other dimensions have the same size as in the original tensor.
  4165. .. note:: The returned tensor does **not** use the same storage as the original
  4166. tensor. If :attr:`out` has a different shape than expected, we
  4167. silently change it to the correct shape, reallocating the underlying
  4168. storage if necessary.
  4169. Args:
  4170. {input}
  4171. dim (int): the dimension in which we index
  4172. index (IntTensor or LongTensor): the 1-D tensor containing the indices to index
  4173. Keyword args:
  4174. {out}
  4175. Example::
  4176. >>> x = torch.randn(3, 4)
  4177. >>> x
  4178. tensor([[ 0.1427, 0.0231, -0.5414, -1.0009],
  4179. [-0.4664, 0.2647, -0.1228, -1.1068],
  4180. [-1.1734, -0.6571, 0.7230, -0.6004]])
  4181. >>> indices = torch.tensor([0, 2])
  4182. >>> torch.index_select(x, 0, indices)
  4183. tensor([[ 0.1427, 0.0231, -0.5414, -1.0009],
  4184. [-1.1734, -0.6571, 0.7230, -0.6004]])
  4185. >>> torch.index_select(x, 1, indices)
  4186. tensor([[ 0.1427, -0.5414],
  4187. [-0.4664, -0.1228],
  4188. [-1.1734, 0.7230]])
  4189. """.format(
  4190. **common_args
  4191. ),
  4192. )
  4193. add_docstr(
  4194. torch.inverse,
  4195. r"""
  4196. inverse(input, *, out=None) -> Tensor
  4197. Alias for :func:`torch.linalg.inv`
  4198. """,
  4199. )
  4200. add_docstr(
  4201. torch.isin,
  4202. r"""
  4203. isin(elements, test_elements, *, assume_unique=False, invert=False) -> Tensor
  4204. Tests if each element of :attr:`elements` is in :attr:`test_elements`. Returns
  4205. a boolean tensor of the same shape as :attr:`elements` that is True for elements
  4206. in :attr:`test_elements` and False otherwise.
  4207. .. note::
  4208. One of :attr:`elements` or :attr:`test_elements` can be a scalar, but not both.
  4209. Args:
  4210. elements (Tensor or Scalar): Input elements
  4211. test_elements (Tensor or Scalar): Values against which to test for each input element
  4212. assume_unique (bool, optional): If True, assumes both :attr:`elements` and
  4213. :attr:`test_elements` contain unique elements, which can speed up the
  4214. calculation. Default: False
  4215. invert (bool, optional): If True, inverts the boolean return tensor, resulting in True
  4216. values for elements *not* in :attr:`test_elements`. Default: False
  4217. Returns:
  4218. A boolean tensor of the same shape as :attr:`elements` that is True for elements in
  4219. :attr:`test_elements` and False otherwise
  4220. Example:
  4221. >>> torch.isin(torch.tensor([[1, 2], [3, 4]]), torch.tensor([2, 3]))
  4222. tensor([[False, True],
  4223. [ True, False]])
  4224. """,
  4225. )
  4226. add_docstr(
  4227. torch.isinf,
  4228. r"""
  4229. isinf(input) -> Tensor
  4230. Tests if each element of :attr:`input` is infinite
  4231. (positive or negative infinity) or not.
  4232. .. note::
  4233. Complex values are infinite when their real or imaginary part is
  4234. infinite.
  4235. Args:
  4236. {input}
  4237. Returns:
  4238. A boolean tensor that is True where :attr:`input` is infinite and False elsewhere
  4239. Example::
  4240. >>> torch.isinf(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')]))
  4241. tensor([False, True, False, True, False])
  4242. """.format(
  4243. **common_args
  4244. ),
  4245. )
  4246. add_docstr(
  4247. torch.isposinf,
  4248. r"""
  4249. isposinf(input, *, out=None) -> Tensor
  4250. Tests if each element of :attr:`input` is positive infinity or not.
  4251. Args:
  4252. {input}
  4253. Keyword args:
  4254. {out}
  4255. Example::
  4256. >>> a = torch.tensor([-float('inf'), float('inf'), 1.2])
  4257. >>> torch.isposinf(a)
  4258. tensor([False, True, False])
  4259. """.format(
  4260. **common_args
  4261. ),
  4262. )
  4263. add_docstr(
  4264. torch.isneginf,
  4265. r"""
  4266. isneginf(input, *, out=None) -> Tensor
  4267. Tests if each element of :attr:`input` is negative infinity or not.
  4268. Args:
  4269. {input}
  4270. Keyword args:
  4271. {out}
  4272. Example::
  4273. >>> a = torch.tensor([-float('inf'), float('inf'), 1.2])
  4274. >>> torch.isneginf(a)
  4275. tensor([ True, False, False])
  4276. """.format(
  4277. **common_args
  4278. ),
  4279. )
  4280. add_docstr(
  4281. torch.isclose,
  4282. r"""
  4283. isclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor
  4284. Returns a new tensor with boolean elements representing if each element of
  4285. :attr:`input` is "close" to the corresponding element of :attr:`other`.
  4286. Closeness is defined as:
  4287. .. math::
  4288. \lvert \text{input} - \text{other} \rvert \leq \texttt{atol} + \texttt{rtol} \times \lvert \text{other} \rvert
  4289. """
  4290. + r"""
  4291. where :attr:`input` and :attr:`other` are finite. Where :attr:`input`
  4292. and/or :attr:`other` are nonfinite they are close if and only if
  4293. they are equal, with NaNs being considered equal to each other when
  4294. :attr:`equal_nan` is True.
  4295. Args:
  4296. input (Tensor): first tensor to compare
  4297. other (Tensor): second tensor to compare
  4298. atol (float, optional): absolute tolerance. Default: 1e-08
  4299. rtol (float, optional): relative tolerance. Default: 1e-05
  4300. equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal. Default: ``False``
  4301. Examples::
  4302. >>> torch.isclose(torch.tensor((1., 2, 3)), torch.tensor((1 + 1e-10, 3, 4)))
  4303. tensor([ True, False, False])
  4304. >>> torch.isclose(torch.tensor((float('inf'), 4)), torch.tensor((float('inf'), 6)), rtol=.5)
  4305. tensor([True, True])
  4306. """,
  4307. )
  4308. add_docstr(
  4309. torch.isfinite,
  4310. r"""
  4311. isfinite(input) -> Tensor
  4312. Returns a new tensor with boolean elements representing if each element is `finite` or not.
  4313. Real values are finite when they are not NaN, negative infinity, or infinity.
  4314. Complex values are finite when both their real and imaginary parts are finite.
  4315. Args:
  4316. {input}
  4317. Returns:
  4318. A boolean tensor that is True where :attr:`input` is finite and False elsewhere
  4319. Example::
  4320. >>> torch.isfinite(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')]))
  4321. tensor([True, False, True, False, False])
  4322. """.format(
  4323. **common_args
  4324. ),
  4325. )
  4326. add_docstr(
  4327. torch.isnan,
  4328. r"""
  4329. isnan(input) -> Tensor
  4330. Returns a new tensor with boolean elements representing if each element of :attr:`input`
  4331. is NaN or not. Complex values are considered NaN when either their real
  4332. and/or imaginary part is NaN.
  4333. Arguments:
  4334. {input}
  4335. Returns:
  4336. A boolean tensor that is True where :attr:`input` is NaN and False elsewhere
  4337. Example::
  4338. >>> torch.isnan(torch.tensor([1, float('nan'), 2]))
  4339. tensor([False, True, False])
  4340. """.format(
  4341. **common_args
  4342. ),
  4343. )
  4344. add_docstr(
  4345. torch.isreal,
  4346. r"""
  4347. isreal(input) -> Tensor
  4348. Returns a new tensor with boolean elements representing if each element of :attr:`input` is real-valued or not.
  4349. All real-valued types are considered real. Complex values are considered real when their imaginary part is 0.
  4350. Arguments:
  4351. {input}
  4352. Returns:
  4353. A boolean tensor that is True where :attr:`input` is real and False elsewhere
  4354. Example::
  4355. >>> torch.isreal(torch.tensor([1, 1+1j, 2+0j]))
  4356. tensor([True, False, True])
  4357. """.format(
  4358. **common_args
  4359. ),
  4360. )
  4361. add_docstr(
  4362. torch.is_floating_point,
  4363. r"""
  4364. is_floating_point(input) -> (bool)
  4365. Returns True if the data type of :attr:`input` is a floating point data type i.e.,
  4366. one of ``torch.float64``, ``torch.float32``, ``torch.float16``, and ``torch.bfloat16``.
  4367. Args:
  4368. {input}
  4369. """.format(
  4370. **common_args
  4371. ),
  4372. )
  4373. add_docstr(
  4374. torch.is_complex,
  4375. r"""
  4376. is_complex(input) -> (bool)
  4377. Returns True if the data type of :attr:`input` is a complex data type i.e.,
  4378. one of ``torch.complex64``, and ``torch.complex128``.
  4379. Args:
  4380. {input}
  4381. """.format(
  4382. **common_args
  4383. ),
  4384. )
  4385. add_docstr(
  4386. torch.is_grad_enabled,
  4387. r"""
  4388. is_grad_enabled() -> (bool)
  4389. Returns True if grad mode is currently enabled.
  4390. """.format(
  4391. **common_args
  4392. ),
  4393. )
  4394. add_docstr(
  4395. torch.is_inference_mode_enabled,
  4396. r"""
  4397. is_inference_mode_enabled() -> (bool)
  4398. Returns True if inference mode is currently enabled.
  4399. """.format(
  4400. **common_args
  4401. ),
  4402. )
  4403. add_docstr(
  4404. torch.is_inference,
  4405. r"""
  4406. is_inference(input) -> (bool)
  4407. Returns True if :attr:`input` is an inference tensor.
  4408. A non-view tensor is an inference tensor if and only if it was
  4409. allocated during inference mode. A view tensor is an inference
  4410. tensor if and only if the tensor it is a view of is an inference tensor.
  4411. For details on inference mode please see
  4412. `Inference Mode <https://pytorch.org/cppdocs/notes/inference_mode.html>`_.
  4413. Args:
  4414. {input}
  4415. """.format(
  4416. **common_args
  4417. ),
  4418. )
  4419. add_docstr(
  4420. torch.is_conj,
  4421. r"""
  4422. is_conj(input) -> (bool)
  4423. Returns True if the :attr:`input` is a conjugated tensor, i.e. its conjugate bit is set to `True`.
  4424. Args:
  4425. {input}
  4426. """.format(
  4427. **common_args
  4428. ),
  4429. )
  4430. add_docstr(
  4431. torch.is_nonzero,
  4432. r"""
  4433. is_nonzero(input) -> (bool)
  4434. Returns True if the :attr:`input` is a single element tensor which is not equal to zero
  4435. after type conversions.
  4436. i.e. not equal to ``torch.tensor([0.])`` or ``torch.tensor([0])`` or
  4437. ``torch.tensor([False])``.
  4438. Throws a ``RuntimeError`` if ``torch.numel() != 1`` (even in case
  4439. of sparse tensors).
  4440. Args:
  4441. {input}
  4442. Examples::
  4443. >>> torch.is_nonzero(torch.tensor([0.]))
  4444. False
  4445. >>> torch.is_nonzero(torch.tensor([1.5]))
  4446. True
  4447. >>> torch.is_nonzero(torch.tensor([False]))
  4448. False
  4449. >>> torch.is_nonzero(torch.tensor([3]))
  4450. True
  4451. >>> torch.is_nonzero(torch.tensor([1, 3, 5]))
  4452. Traceback (most recent call last):
  4453. ...
  4454. RuntimeError: bool value of Tensor with more than one value is ambiguous
  4455. >>> torch.is_nonzero(torch.tensor([]))
  4456. Traceback (most recent call last):
  4457. ...
  4458. RuntimeError: bool value of Tensor with no values is ambiguous
  4459. """.format(
  4460. **common_args
  4461. ),
  4462. )
  4463. add_docstr(
  4464. torch.kron,
  4465. r"""
  4466. kron(input, other, *, out=None) -> Tensor
  4467. Computes the Kronecker product, denoted by :math:`\otimes`, of :attr:`input` and :attr:`other`.
  4468. If :attr:`input` is a :math:`(a_0 \times a_1 \times \dots \times a_n)` tensor and :attr:`other` is a
  4469. :math:`(b_0 \times b_1 \times \dots \times b_n)` tensor, the result will be a
  4470. :math:`(a_0*b_0 \times a_1*b_1 \times \dots \times a_n*b_n)` tensor with the following entries:
  4471. .. math::
  4472. (\text{input} \otimes \text{other})_{k_0, k_1, \dots, k_n} =
  4473. \text{input}_{i_0, i_1, \dots, i_n} * \text{other}_{j_0, j_1, \dots, j_n},
  4474. where :math:`k_t = i_t * b_t + j_t` for :math:`0 \leq t \leq n`.
  4475. If one tensor has fewer dimensions than the other it is unsqueezed until it has the same number of dimensions.
  4476. Supports real-valued and complex-valued inputs.
  4477. .. note::
  4478. This function generalizes the typical definition of the Kronecker product for two matrices to two tensors,
  4479. as described above. When :attr:`input` is a :math:`(m \times n)` matrix and :attr:`other` is a
  4480. :math:`(p \times q)` matrix, the result will be a :math:`(p*m \times q*n)` block matrix:
  4481. .. math::
  4482. \mathbf{A} \otimes \mathbf{B}=\begin{bmatrix}
  4483. a_{11} \mathbf{B} & \cdots & a_{1 n} \mathbf{B} \\
  4484. \vdots & \ddots & \vdots \\
  4485. a_{m 1} \mathbf{B} & \cdots & a_{m n} \mathbf{B} \end{bmatrix}
  4486. where :attr:`input` is :math:`\mathbf{A}` and :attr:`other` is :math:`\mathbf{B}`.
  4487. Arguments:
  4488. input (Tensor)
  4489. other (Tensor)
  4490. Keyword args:
  4491. out (Tensor, optional): The output tensor. Ignored if ``None``. Default: ``None``
  4492. Examples::
  4493. >>> mat1 = torch.eye(2)
  4494. >>> mat2 = torch.ones(2, 2)
  4495. >>> torch.kron(mat1, mat2)
  4496. tensor([[1., 1., 0., 0.],
  4497. [1., 1., 0., 0.],
  4498. [0., 0., 1., 1.],
  4499. [0., 0., 1., 1.]])
  4500. >>> mat1 = torch.eye(2)
  4501. >>> mat2 = torch.arange(1, 5).reshape(2, 2)
  4502. >>> torch.kron(mat1, mat2)
  4503. tensor([[1., 2., 0., 0.],
  4504. [3., 4., 0., 0.],
  4505. [0., 0., 1., 2.],
  4506. [0., 0., 3., 4.]])
  4507. """,
  4508. )
  4509. add_docstr(
  4510. torch.kthvalue,
  4511. r"""
  4512. kthvalue(input, k, dim=None, keepdim=False, *, out=None) -> (Tensor, LongTensor)
  4513. Returns a namedtuple ``(values, indices)`` where ``values`` is the :attr:`k` th
  4514. smallest element of each row of the :attr:`input` tensor in the given dimension
  4515. :attr:`dim`. And ``indices`` is the index location of each element found.
  4516. If :attr:`dim` is not given, the last dimension of the `input` is chosen.
  4517. If :attr:`keepdim` is ``True``, both the :attr:`values` and :attr:`indices` tensors
  4518. are the same size as :attr:`input`, except in the dimension :attr:`dim` where
  4519. they are of size 1. Otherwise, :attr:`dim` is squeezed
  4520. (see :func:`torch.squeeze`), resulting in both the :attr:`values` and
  4521. :attr:`indices` tensors having 1 fewer dimension than the :attr:`input` tensor.
  4522. .. note::
  4523. When :attr:`input` is a CUDA tensor and there are multiple valid
  4524. :attr:`k` th values, this function may nondeterministically return
  4525. :attr:`indices` for any of them.
  4526. Args:
  4527. {input}
  4528. k (int): k for the k-th smallest element
  4529. dim (int, optional): the dimension to find the kth value along
  4530. {keepdim}
  4531. Keyword args:
  4532. out (tuple, optional): the output tuple of (Tensor, LongTensor)
  4533. can be optionally given to be used as output buffers
  4534. Example::
  4535. >>> x = torch.arange(1., 6.)
  4536. >>> x
  4537. tensor([ 1., 2., 3., 4., 5.])
  4538. >>> torch.kthvalue(x, 4)
  4539. torch.return_types.kthvalue(values=tensor(4.), indices=tensor(3))
  4540. >>> x=torch.arange(1.,7.).resize_(2,3)
  4541. >>> x
  4542. tensor([[ 1., 2., 3.],
  4543. [ 4., 5., 6.]])
  4544. >>> torch.kthvalue(x, 2, 0, True)
  4545. torch.return_types.kthvalue(values=tensor([[4., 5., 6.]]), indices=tensor([[1, 1, 1]]))
  4546. """.format(
  4547. **single_dim_common
  4548. ),
  4549. )
  4550. add_docstr(
  4551. torch.lcm,
  4552. r"""
  4553. lcm(input, other, *, out=None) -> Tensor
  4554. Computes the element-wise least common multiple (LCM) of :attr:`input` and :attr:`other`.
  4555. Both :attr:`input` and :attr:`other` must have integer types.
  4556. .. note::
  4557. This defines :math:`lcm(0, 0) = 0` and :math:`lcm(0, a) = 0`.
  4558. Args:
  4559. {input}
  4560. other (Tensor): the second input tensor
  4561. Keyword arguments:
  4562. {out}
  4563. Example::
  4564. >>> a = torch.tensor([5, 10, 15])
  4565. >>> b = torch.tensor([3, 4, 5])
  4566. >>> torch.lcm(a, b)
  4567. tensor([15, 20, 15])
  4568. >>> c = torch.tensor([3])
  4569. >>> torch.lcm(a, c)
  4570. tensor([15, 30, 15])
  4571. """.format(
  4572. **common_args
  4573. ),
  4574. )
  4575. add_docstr(
  4576. torch.ldexp,
  4577. r"""
  4578. ldexp(input, other, *, out=None) -> Tensor
  4579. Multiplies :attr:`input` by 2 ** :attr:`other`.
  4580. .. math::
  4581. \text{{out}}_i = \text{{input}}_i * 2^\text{{other}}_i
  4582. """
  4583. + r"""
  4584. Typically this function is used to construct floating point numbers by multiplying
  4585. mantissas in :attr:`input` with integral powers of two created from the exponents
  4586. in :attr:`other`.
  4587. Args:
  4588. {input}
  4589. other (Tensor): a tensor of exponents, typically integers.
  4590. Keyword args:
  4591. {out}
  4592. Example::
  4593. >>> torch.ldexp(torch.tensor([1.]), torch.tensor([1]))
  4594. tensor([2.])
  4595. >>> torch.ldexp(torch.tensor([1.0]), torch.tensor([1, 2, 3, 4]))
  4596. tensor([ 2., 4., 8., 16.])
  4597. """.format(
  4598. **common_args
  4599. ),
  4600. )
  4601. add_docstr(
  4602. torch.le,
  4603. r"""
  4604. le(input, other, *, out=None) -> Tensor
  4605. Computes :math:`\text{input} \leq \text{other}` element-wise.
  4606. """
  4607. + r"""
  4608. The second argument can be a number or a tensor whose shape is
  4609. :ref:`broadcastable <broadcasting-semantics>` with the first argument.
  4610. Args:
  4611. input (Tensor): the tensor to compare
  4612. other (Tensor or Scalar): the tensor or value to compare
  4613. Keyword args:
  4614. {out}
  4615. Returns:
  4616. A boolean tensor that is True where :attr:`input` is less than or equal to
  4617. :attr:`other` and False elsewhere
  4618. Example::
  4619. >>> torch.le(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
  4620. tensor([[True, False], [True, True]])
  4621. """.format(
  4622. **common_args
  4623. ),
  4624. )
  4625. add_docstr(
  4626. torch.less_equal,
  4627. r"""
  4628. less_equal(input, other, *, out=None) -> Tensor
  4629. Alias for :func:`torch.le`.
  4630. """,
  4631. )
  4632. add_docstr(
  4633. torch.lerp,
  4634. r"""
  4635. lerp(input, end, weight, *, out=None)
  4636. Does a linear interpolation of two tensors :attr:`start` (given by :attr:`input`) and :attr:`end` based
  4637. on a scalar or tensor :attr:`weight` and returns the resulting :attr:`out` tensor.
  4638. .. math::
  4639. \text{out}_i = \text{start}_i + \text{weight}_i \times (\text{end}_i - \text{start}_i)
  4640. """
  4641. + r"""
  4642. The shapes of :attr:`start` and :attr:`end` must be
  4643. :ref:`broadcastable <broadcasting-semantics>`. If :attr:`weight` is a tensor, then
  4644. the shapes of :attr:`weight`, :attr:`start`, and :attr:`end` must be :ref:`broadcastable <broadcasting-semantics>`.
  4645. Args:
  4646. input (Tensor): the tensor with the starting points
  4647. end (Tensor): the tensor with the ending points
  4648. weight (float or tensor): the weight for the interpolation formula
  4649. Keyword args:
  4650. {out}
  4651. Example::
  4652. >>> start = torch.arange(1., 5.)
  4653. >>> end = torch.empty(4).fill_(10)
  4654. >>> start
  4655. tensor([ 1., 2., 3., 4.])
  4656. >>> end
  4657. tensor([ 10., 10., 10., 10.])
  4658. >>> torch.lerp(start, end, 0.5)
  4659. tensor([ 5.5000, 6.0000, 6.5000, 7.0000])
  4660. >>> torch.lerp(start, end, torch.full_like(start, 0.5))
  4661. tensor([ 5.5000, 6.0000, 6.5000, 7.0000])
  4662. """.format(
  4663. **common_args
  4664. ),
  4665. )
  4666. add_docstr(
  4667. torch.lgamma,
  4668. r"""
  4669. lgamma(input, *, out=None) -> Tensor
  4670. Computes the natural logarithm of the absolute value of the gamma function on :attr:`input`.
  4671. .. math::
  4672. \text{out}_{i} = \ln \Gamma(|\text{input}_{i}|)
  4673. """
  4674. + """
  4675. Args:
  4676. {input}
  4677. Keyword args:
  4678. {out}
  4679. Example::
  4680. >>> a = torch.arange(0.5, 2, 0.5)
  4681. >>> torch.lgamma(a)
  4682. tensor([ 0.5724, 0.0000, -0.1208])
  4683. """.format(
  4684. **common_args
  4685. ),
  4686. )
  4687. add_docstr(
  4688. torch.linspace,
  4689. r"""
  4690. linspace(start, end, steps, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  4691. Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
  4692. spaced from :attr:`start` to :attr:`end`, inclusive. That is, the value are:
  4693. .. math::
  4694. (\text{start},
  4695. \text{start} + \frac{\text{end} - \text{start}}{\text{steps} - 1},
  4696. \ldots,
  4697. \text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{\text{steps} - 1},
  4698. \text{end})
  4699. """
  4700. + """
  4701. From PyTorch 1.11 linspace requires the steps argument. Use steps=100 to restore the previous behavior.
  4702. Args:
  4703. start (float): the starting value for the set of points
  4704. end (float): the ending value for the set of points
  4705. steps (int): size of the constructed tensor
  4706. Keyword arguments:
  4707. {out}
  4708. dtype (torch.dtype, optional): the data type to perform the computation in.
  4709. Default: if None, uses the global default dtype (see torch.get_default_dtype())
  4710. when both :attr:`start` and :attr:`end` are real,
  4711. and corresponding complex dtype when either is complex.
  4712. {layout}
  4713. {device}
  4714. {requires_grad}
  4715. Example::
  4716. >>> torch.linspace(3, 10, steps=5)
  4717. tensor([ 3.0000, 4.7500, 6.5000, 8.2500, 10.0000])
  4718. >>> torch.linspace(-10, 10, steps=5)
  4719. tensor([-10., -5., 0., 5., 10.])
  4720. >>> torch.linspace(start=-10, end=10, steps=5)
  4721. tensor([-10., -5., 0., 5., 10.])
  4722. >>> torch.linspace(start=-10, end=10, steps=1)
  4723. tensor([-10.])
  4724. """.format(
  4725. **factory_common_args
  4726. ),
  4727. )
  4728. add_docstr(
  4729. torch.log,
  4730. r"""
  4731. log(input, *, out=None) -> Tensor
  4732. Returns a new tensor with the natural logarithm of the elements
  4733. of :attr:`input`.
  4734. .. math::
  4735. y_{i} = \log_{e} (x_{i})
  4736. """
  4737. + r"""
  4738. Args:
  4739. {input}
  4740. Keyword args:
  4741. {out}
  4742. Example::
  4743. >>> a = torch.rand(5) * 5
  4744. >>> a
  4745. tensor([4.7767, 4.3234, 1.2156, 0.2411, 4.5739])
  4746. >>> torch.log(a)
  4747. tensor([ 1.5637, 1.4640, 0.1952, -1.4226, 1.5204])
  4748. """.format(
  4749. **common_args
  4750. ),
  4751. )
  4752. add_docstr(
  4753. torch.log10,
  4754. r"""
  4755. log10(input, *, out=None) -> Tensor
  4756. Returns a new tensor with the logarithm to the base 10 of the elements
  4757. of :attr:`input`.
  4758. .. math::
  4759. y_{i} = \log_{10} (x_{i})
  4760. """
  4761. + r"""
  4762. Args:
  4763. {input}
  4764. Keyword args:
  4765. {out}
  4766. Example::
  4767. >>> a = torch.rand(5)
  4768. >>> a
  4769. tensor([ 0.5224, 0.9354, 0.7257, 0.1301, 0.2251])
  4770. >>> torch.log10(a)
  4771. tensor([-0.2820, -0.0290, -0.1392, -0.8857, -0.6476])
  4772. """.format(
  4773. **common_args
  4774. ),
  4775. )
  4776. add_docstr(
  4777. torch.log1p,
  4778. r"""
  4779. log1p(input, *, out=None) -> Tensor
  4780. Returns a new tensor with the natural logarithm of (1 + :attr:`input`).
  4781. .. math::
  4782. y_i = \log_{e} (x_i + 1)
  4783. """
  4784. + r"""
  4785. .. note:: This function is more accurate than :func:`torch.log` for small
  4786. values of :attr:`input`
  4787. Args:
  4788. {input}
  4789. Keyword args:
  4790. {out}
  4791. Example::
  4792. >>> a = torch.randn(5)
  4793. >>> a
  4794. tensor([-1.0090, -0.9923, 1.0249, -0.5372, 0.2492])
  4795. >>> torch.log1p(a)
  4796. tensor([ nan, -4.8653, 0.7055, -0.7705, 0.2225])
  4797. """.format(
  4798. **common_args
  4799. ),
  4800. )
  4801. add_docstr(
  4802. torch.log2,
  4803. r"""
  4804. log2(input, *, out=None) -> Tensor
  4805. Returns a new tensor with the logarithm to the base 2 of the elements
  4806. of :attr:`input`.
  4807. .. math::
  4808. y_{i} = \log_{2} (x_{i})
  4809. """
  4810. + r"""
  4811. Args:
  4812. {input}
  4813. Keyword args:
  4814. {out}
  4815. Example::
  4816. >>> a = torch.rand(5)
  4817. >>> a
  4818. tensor([ 0.8419, 0.8003, 0.9971, 0.5287, 0.0490])
  4819. >>> torch.log2(a)
  4820. tensor([-0.2483, -0.3213, -0.0042, -0.9196, -4.3504])
  4821. """.format(
  4822. **common_args
  4823. ),
  4824. )
  4825. add_docstr(
  4826. torch.logaddexp,
  4827. r"""
  4828. logaddexp(input, other, *, out=None) -> Tensor
  4829. Logarithm of the sum of exponentiations of the inputs.
  4830. Calculates pointwise :math:`\log\left(e^x + e^y\right)`. This function is useful
  4831. in statistics where the calculated probabilities of events may be so small as to
  4832. exceed the range of normal floating point numbers. In such cases the logarithm
  4833. of the calculated probability is stored. This function allows adding
  4834. probabilities stored in such a fashion.
  4835. This op should be disambiguated with :func:`torch.logsumexp` which performs a
  4836. reduction on a single tensor.
  4837. Args:
  4838. {input}
  4839. other (Tensor): the second input tensor
  4840. Keyword arguments:
  4841. {out}
  4842. Example::
  4843. >>> torch.logaddexp(torch.tensor([-1.0]), torch.tensor([-1.0, -2, -3]))
  4844. tensor([-0.3069, -0.6867, -0.8731])
  4845. >>> torch.logaddexp(torch.tensor([-100.0, -200, -300]), torch.tensor([-1.0, -2, -3]))
  4846. tensor([-1., -2., -3.])
  4847. >>> torch.logaddexp(torch.tensor([1.0, 2000, 30000]), torch.tensor([-1.0, -2, -3]))
  4848. tensor([1.1269e+00, 2.0000e+03, 3.0000e+04])
  4849. """.format(
  4850. **common_args
  4851. ),
  4852. )
  4853. add_docstr(
  4854. torch.logaddexp2,
  4855. r"""
  4856. logaddexp2(input, other, *, out=None) -> Tensor
  4857. Logarithm of the sum of exponentiations of the inputs in base-2.
  4858. Calculates pointwise :math:`\log_2\left(2^x + 2^y\right)`. See
  4859. :func:`torch.logaddexp` for more details.
  4860. Args:
  4861. {input}
  4862. other (Tensor): the second input tensor
  4863. Keyword arguments:
  4864. {out}
  4865. """.format(
  4866. **common_args
  4867. ),
  4868. )
  4869. add_docstr(
  4870. torch.xlogy,
  4871. r"""
  4872. xlogy(input, other, *, out=None) -> Tensor
  4873. Alias for :func:`torch.special.xlogy`.
  4874. """,
  4875. )
  4876. add_docstr(
  4877. torch.logical_and,
  4878. r"""
  4879. logical_and(input, other, *, out=None) -> Tensor
  4880. Computes the element-wise logical AND of the given input tensors. Zeros are treated as ``False`` and nonzeros are
  4881. treated as ``True``.
  4882. Args:
  4883. {input}
  4884. other (Tensor): the tensor to compute AND with
  4885. Keyword args:
  4886. {out}
  4887. Example::
  4888. >>> torch.logical_and(torch.tensor([True, False, True]), torch.tensor([True, False, False]))
  4889. tensor([ True, False, False])
  4890. >>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
  4891. >>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
  4892. >>> torch.logical_and(a, b)
  4893. tensor([False, False, True, False])
  4894. >>> torch.logical_and(a.double(), b.double())
  4895. tensor([False, False, True, False])
  4896. >>> torch.logical_and(a.double(), b)
  4897. tensor([False, False, True, False])
  4898. >>> torch.logical_and(a, b, out=torch.empty(4, dtype=torch.bool))
  4899. tensor([False, False, True, False])
  4900. """.format(
  4901. **common_args
  4902. ),
  4903. )
  4904. add_docstr(
  4905. torch.logical_not,
  4906. r"""
  4907. logical_not(input, *, out=None) -> Tensor
  4908. Computes the element-wise logical NOT of the given input tensor. If not specified, the output tensor will have the bool
  4909. dtype. If the input tensor is not a bool tensor, zeros are treated as ``False`` and non-zeros are treated as ``True``.
  4910. Args:
  4911. {input}
  4912. Keyword args:
  4913. {out}
  4914. Example::
  4915. >>> torch.logical_not(torch.tensor([True, False]))
  4916. tensor([False, True])
  4917. >>> torch.logical_not(torch.tensor([0, 1, -10], dtype=torch.int8))
  4918. tensor([ True, False, False])
  4919. >>> torch.logical_not(torch.tensor([0., 1.5, -10.], dtype=torch.double))
  4920. tensor([ True, False, False])
  4921. >>> torch.logical_not(torch.tensor([0., 1., -10.], dtype=torch.double), out=torch.empty(3, dtype=torch.int16))
  4922. tensor([1, 0, 0], dtype=torch.int16)
  4923. """.format(
  4924. **common_args
  4925. ),
  4926. )
  4927. add_docstr(
  4928. torch.logical_or,
  4929. r"""
  4930. logical_or(input, other, *, out=None) -> Tensor
  4931. Computes the element-wise logical OR of the given input tensors. Zeros are treated as ``False`` and nonzeros are
  4932. treated as ``True``.
  4933. Args:
  4934. {input}
  4935. other (Tensor): the tensor to compute OR with
  4936. Keyword args:
  4937. {out}
  4938. Example::
  4939. >>> torch.logical_or(torch.tensor([True, False, True]), torch.tensor([True, False, False]))
  4940. tensor([ True, False, True])
  4941. >>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
  4942. >>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
  4943. >>> torch.logical_or(a, b)
  4944. tensor([ True, True, True, False])
  4945. >>> torch.logical_or(a.double(), b.double())
  4946. tensor([ True, True, True, False])
  4947. >>> torch.logical_or(a.double(), b)
  4948. tensor([ True, True, True, False])
  4949. >>> torch.logical_or(a, b, out=torch.empty(4, dtype=torch.bool))
  4950. tensor([ True, True, True, False])
  4951. """.format(
  4952. **common_args
  4953. ),
  4954. )
  4955. add_docstr(
  4956. torch.logical_xor,
  4957. r"""
  4958. logical_xor(input, other, *, out=None) -> Tensor
  4959. Computes the element-wise logical XOR of the given input tensors. Zeros are treated as ``False`` and nonzeros are
  4960. treated as ``True``.
  4961. Args:
  4962. {input}
  4963. other (Tensor): the tensor to compute XOR with
  4964. Keyword args:
  4965. {out}
  4966. Example::
  4967. >>> torch.logical_xor(torch.tensor([True, False, True]), torch.tensor([True, False, False]))
  4968. tensor([False, False, True])
  4969. >>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
  4970. >>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
  4971. >>> torch.logical_xor(a, b)
  4972. tensor([ True, True, False, False])
  4973. >>> torch.logical_xor(a.double(), b.double())
  4974. tensor([ True, True, False, False])
  4975. >>> torch.logical_xor(a.double(), b)
  4976. tensor([ True, True, False, False])
  4977. >>> torch.logical_xor(a, b, out=torch.empty(4, dtype=torch.bool))
  4978. tensor([ True, True, False, False])
  4979. """.format(
  4980. **common_args
  4981. ),
  4982. )
  4983. add_docstr(
  4984. torch.logspace,
  4985. """
  4986. logspace(start, end, steps, base=10.0, *, \
  4987. out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  4988. """
  4989. + r"""
  4990. Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
  4991. spaced from :math:`{{\text{{base}}}}^{{\text{{start}}}}` to
  4992. :math:`{{\text{{base}}}}^{{\text{{end}}}}`, inclusive, on a logarithmic scale
  4993. with base :attr:`base`. That is, the values are:
  4994. .. math::
  4995. (\text{base}^{\text{start}},
  4996. \text{base}^{(\text{start} + \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
  4997. \ldots,
  4998. \text{base}^{(\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
  4999. \text{base}^{\text{end}})
  5000. """
  5001. + """
  5002. From PyTorch 1.11 logspace requires the steps argument. Use steps=100 to restore the previous behavior.
  5003. Args:
  5004. start (float): the starting value for the set of points
  5005. end (float): the ending value for the set of points
  5006. steps (int): size of the constructed tensor
  5007. base (float, optional): base of the logarithm function. Default: ``10.0``.
  5008. Keyword arguments:
  5009. {out}
  5010. dtype (torch.dtype, optional): the data type to perform the computation in.
  5011. Default: if None, uses the global default dtype (see torch.get_default_dtype())
  5012. when both :attr:`start` and :attr:`end` are real,
  5013. and corresponding complex dtype when either is complex.
  5014. {layout}
  5015. {device}
  5016. {requires_grad}
  5017. Example::
  5018. >>> torch.logspace(start=-10, end=10, steps=5)
  5019. tensor([ 1.0000e-10, 1.0000e-05, 1.0000e+00, 1.0000e+05, 1.0000e+10])
  5020. >>> torch.logspace(start=0.1, end=1.0, steps=5)
  5021. tensor([ 1.2589, 2.1135, 3.5481, 5.9566, 10.0000])
  5022. >>> torch.logspace(start=0.1, end=1.0, steps=1)
  5023. tensor([1.2589])
  5024. >>> torch.logspace(start=2, end=2, steps=1, base=2)
  5025. tensor([4.0])
  5026. """.format(
  5027. **factory_common_args
  5028. ),
  5029. )
  5030. add_docstr(
  5031. torch.logsumexp,
  5032. r"""
  5033. logsumexp(input, dim, keepdim=False, *, out=None)
  5034. Returns the log of summed exponentials of each row of the :attr:`input`
  5035. tensor in the given dimension :attr:`dim`. The computation is numerically
  5036. stabilized.
  5037. For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is
  5038. .. math::
  5039. \text{{logsumexp}}(x)_{{i}} = \log \sum_j \exp(x_{{ij}})
  5040. {keepdim_details}
  5041. Args:
  5042. {input}
  5043. {opt_dim}
  5044. {keepdim}
  5045. Keyword args:
  5046. {out}
  5047. Example::
  5048. >>> a = torch.randn(3, 3)
  5049. >>> torch.logsumexp(a, 1)
  5050. tensor([1.4907, 1.0593, 1.5696])
  5051. >>> torch.dist(torch.logsumexp(a, 1), torch.log(torch.sum(torch.exp(a), 1)))
  5052. tensor(1.6859e-07)
  5053. """.format(
  5054. **multi_dim_common
  5055. ),
  5056. )
  5057. add_docstr(
  5058. torch.lt,
  5059. r"""
  5060. lt(input, other, *, out=None) -> Tensor
  5061. Computes :math:`\text{input} < \text{other}` element-wise.
  5062. """
  5063. + r"""
  5064. The second argument can be a number or a tensor whose shape is
  5065. :ref:`broadcastable <broadcasting-semantics>` with the first argument.
  5066. Args:
  5067. input (Tensor): the tensor to compare
  5068. other (Tensor or float): the tensor or value to compare
  5069. Keyword args:
  5070. {out}
  5071. Returns:
  5072. A boolean tensor that is True where :attr:`input` is less than :attr:`other` and False elsewhere
  5073. Example::
  5074. >>> torch.lt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
  5075. tensor([[False, False], [True, False]])
  5076. """.format(
  5077. **common_args
  5078. ),
  5079. )
  5080. add_docstr(
  5081. torch.lu_unpack,
  5082. r"""
  5083. lu_unpack(LU_data, LU_pivots, unpack_data=True, unpack_pivots=True, *, out=None) -> (Tensor, Tensor, Tensor)
  5084. Unpacks the LU decomposition returned by :func:`~linalg.lu_factor` into the `P, L, U` matrices.
  5085. .. seealso::
  5086. :func:`~linalg.lu` returns the matrices from the LU decomposition. Its gradient formula is more efficient
  5087. than that of doing :func:`~linalg.lu_factor` followed by :func:`~linalg.lu_unpack`.
  5088. Args:
  5089. LU_data (Tensor): the packed LU factorization data
  5090. LU_pivots (Tensor): the packed LU factorization pivots
  5091. unpack_data (bool): flag indicating if the data should be unpacked.
  5092. If ``False``, then the returned ``L`` and ``U`` are empty tensors.
  5093. Default: ``True``
  5094. unpack_pivots (bool): flag indicating if the pivots should be unpacked into a permutation matrix ``P``.
  5095. If ``False``, then the returned ``P`` is an empty tensor.
  5096. Default: ``True``
  5097. Keyword args:
  5098. out (tuple, optional): output tuple of three tensors. Ignored if `None`.
  5099. Returns:
  5100. A namedtuple ``(P, L, U)``
  5101. Examples::
  5102. >>> A = torch.randn(2, 3, 3)
  5103. >>> LU, pivots = torch.linalg.lu_factor(A)
  5104. >>> P, L, U = torch.lu_unpack(LU, pivots)
  5105. >>> # We can recover A from the factorization
  5106. >>> A_ = P @ L @ U
  5107. >>> torch.allclose(A, A_)
  5108. True
  5109. >>> # LU factorization of a rectangular matrix:
  5110. >>> A = torch.randn(2, 3, 2)
  5111. >>> LU, pivots = torch.linalg.lu_factor(A)
  5112. >>> P, L, U = torch.lu_unpack(LU, pivots)
  5113. >>> # P, L, U are the same as returned by linalg.lu
  5114. >>> P_, L_, U_ = torch.linalg.lu(A)
  5115. >>> torch.allclose(P, P_) and torch.allclose(L, L_) and torch.allclose(U, U_)
  5116. True
  5117. """.format(
  5118. **common_args
  5119. ),
  5120. )
  5121. add_docstr(
  5122. torch.less,
  5123. r"""
  5124. less(input, other, *, out=None) -> Tensor
  5125. Alias for :func:`torch.lt`.
  5126. """,
  5127. )
  5128. add_docstr(
  5129. torch.lu_solve,
  5130. r"""
  5131. lu_solve(b, LU_data, LU_pivots, *, out=None) -> Tensor
  5132. Returns the LU solve of the linear system :math:`Ax = b` using the partially pivoted
  5133. LU factorization of A from :func:`~linalg.lu_factor`.
  5134. This function supports ``float``, ``double``, ``cfloat`` and ``cdouble`` dtypes for :attr:`input`.
  5135. .. warning::
  5136. :func:`torch.lu_solve` is deprecated in favor of :func:`torch.linalg.lu_solve`.
  5137. :func:`torch.lu_solve` will be removed in a future PyTorch release.
  5138. ``X = torch.lu_solve(B, LU, pivots)`` should be replaced with
  5139. .. code:: python
  5140. X = linalg.lu_solve(LU, pivots, B)
  5141. Arguments:
  5142. b (Tensor): the RHS tensor of size :math:`(*, m, k)`, where :math:`*`
  5143. is zero or more batch dimensions.
  5144. LU_data (Tensor): the pivoted LU factorization of A from :meth:`~linalg.lu_factor` of size :math:`(*, m, m)`,
  5145. where :math:`*` is zero or more batch dimensions.
  5146. LU_pivots (IntTensor): the pivots of the LU factorization from :meth:`~linalg.lu_factor` of size :math:`(*, m)`,
  5147. where :math:`*` is zero or more batch dimensions.
  5148. The batch dimensions of :attr:`LU_pivots` must be equal to the batch dimensions of
  5149. :attr:`LU_data`.
  5150. Keyword args:
  5151. {out}
  5152. Example::
  5153. >>> A = torch.randn(2, 3, 3)
  5154. >>> b = torch.randn(2, 3, 1)
  5155. >>> LU, pivots = torch.linalg.lu_factor(A)
  5156. >>> x = torch.lu_solve(b, LU, pivots)
  5157. >>> torch.dist(A @ x, b)
  5158. tensor(1.00000e-07 *
  5159. 2.8312)
  5160. """.format(
  5161. **common_args
  5162. ),
  5163. )
  5164. add_docstr(
  5165. torch.masked_select,
  5166. r"""
  5167. masked_select(input, mask, *, out=None) -> Tensor
  5168. Returns a new 1-D tensor which indexes the :attr:`input` tensor according to
  5169. the boolean mask :attr:`mask` which is a `BoolTensor`.
  5170. The shapes of the :attr:`mask` tensor and the :attr:`input` tensor don't need
  5171. to match, but they must be :ref:`broadcastable <broadcasting-semantics>`.
  5172. .. note:: The returned tensor does **not** use the same storage
  5173. as the original tensor
  5174. Args:
  5175. {input}
  5176. mask (BoolTensor): the tensor containing the binary mask to index with
  5177. Keyword args:
  5178. {out}
  5179. Example::
  5180. >>> x = torch.randn(3, 4)
  5181. >>> x
  5182. tensor([[ 0.3552, -2.3825, -0.8297, 0.3477],
  5183. [-1.2035, 1.2252, 0.5002, 0.6248],
  5184. [ 0.1307, -2.0608, 0.1244, 2.0139]])
  5185. >>> mask = x.ge(0.5)
  5186. >>> mask
  5187. tensor([[False, False, False, False],
  5188. [False, True, True, True],
  5189. [False, False, False, True]])
  5190. >>> torch.masked_select(x, mask)
  5191. tensor([ 1.2252, 0.5002, 0.6248, 2.0139])
  5192. """.format(
  5193. **common_args
  5194. ),
  5195. )
  5196. add_docstr(
  5197. torch.matrix_power,
  5198. r"""
  5199. matrix_power(input, n, *, out=None) -> Tensor
  5200. Alias for :func:`torch.linalg.matrix_power`
  5201. """,
  5202. )
  5203. add_docstr(
  5204. torch.matrix_exp,
  5205. r"""
  5206. matrix_exp(A) -> Tensor
  5207. Alias for :func:`torch.linalg.matrix_exp`.
  5208. """,
  5209. )
  5210. add_docstr(
  5211. torch.max,
  5212. r"""
  5213. max(input) -> Tensor
  5214. Returns the maximum value of all elements in the ``input`` tensor.
  5215. .. warning::
  5216. This function produces deterministic (sub)gradients unlike ``max(dim=0)``
  5217. Args:
  5218. {input}
  5219. Example::
  5220. >>> a = torch.randn(1, 3)
  5221. >>> a
  5222. tensor([[ 0.6763, 0.7445, -2.2369]])
  5223. >>> torch.max(a)
  5224. tensor(0.7445)
  5225. .. function:: max(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
  5226. :noindex:
  5227. Returns a namedtuple ``(values, indices)`` where ``values`` is the maximum
  5228. value of each row of the :attr:`input` tensor in the given dimension
  5229. :attr:`dim`. And ``indices`` is the index location of each maximum value found
  5230. (argmax).
  5231. If ``keepdim`` is ``True``, the output tensors are of the same size
  5232. as ``input`` except in the dimension ``dim`` where they are of size 1.
  5233. Otherwise, ``dim`` is squeezed (see :func:`torch.squeeze`), resulting
  5234. in the output tensors having 1 fewer dimension than ``input``.
  5235. .. note:: If there are multiple maximal values in a reduced row then
  5236. the indices of the first maximal value are returned.
  5237. Args:
  5238. {input}
  5239. {dim}
  5240. {keepdim} Default: ``False``.
  5241. Keyword args:
  5242. out (tuple, optional): the result tuple of two output tensors (max, max_indices)
  5243. Example::
  5244. >>> a = torch.randn(4, 4)
  5245. >>> a
  5246. tensor([[-1.2360, -0.2942, -0.1222, 0.8475],
  5247. [ 1.1949, -1.1127, -2.2379, -0.6702],
  5248. [ 1.5717, -0.9207, 0.1297, -1.8768],
  5249. [-0.6172, 1.0036, -0.6060, -0.2432]])
  5250. >>> torch.max(a, 1)
  5251. torch.return_types.max(values=tensor([0.8475, 1.1949, 1.5717, 1.0036]), indices=tensor([3, 0, 0, 1]))
  5252. .. function:: max(input, other, *, out=None) -> Tensor
  5253. :noindex:
  5254. See :func:`torch.maximum`.
  5255. """.format(
  5256. **single_dim_common
  5257. ),
  5258. )
  5259. add_docstr(
  5260. torch.maximum,
  5261. r"""
  5262. maximum(input, other, *, out=None) -> Tensor
  5263. Computes the element-wise maximum of :attr:`input` and :attr:`other`.
  5264. .. note::
  5265. If one of the elements being compared is a NaN, then that element is returned.
  5266. :func:`maximum` is not supported for tensors with complex dtypes.
  5267. Args:
  5268. {input}
  5269. other (Tensor): the second input tensor
  5270. Keyword args:
  5271. {out}
  5272. Example::
  5273. >>> a = torch.tensor((1, 2, -1))
  5274. >>> b = torch.tensor((3, 0, 4))
  5275. >>> torch.maximum(a, b)
  5276. tensor([3, 2, 4])
  5277. """.format(
  5278. **common_args
  5279. ),
  5280. )
  5281. add_docstr(
  5282. torch.fmax,
  5283. r"""
  5284. fmax(input, other, *, out=None) -> Tensor
  5285. Computes the element-wise maximum of :attr:`input` and :attr:`other`.
  5286. This is like :func:`torch.maximum` except it handles NaNs differently:
  5287. if exactly one of the two elements being compared is a NaN then the non-NaN element is taken as the maximum.
  5288. Only if both elements are NaN is NaN propagated.
  5289. This function is a wrapper around C++'s ``std::fmax`` and is similar to NumPy's ``fmax`` function.
  5290. Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
  5291. :ref:`type promotion <type-promotion-doc>`, and integer and floating-point inputs.
  5292. Args:
  5293. {input}
  5294. other (Tensor): the second input tensor
  5295. Keyword args:
  5296. {out}
  5297. Example::
  5298. >>> a = torch.tensor([9.7, float('nan'), 3.1, float('nan')])
  5299. >>> b = torch.tensor([-2.2, 0.5, float('nan'), float('nan')])
  5300. >>> torch.fmax(a, b)
  5301. tensor([9.7000, 0.5000, 3.1000, nan])
  5302. """.format(
  5303. **common_args
  5304. ),
  5305. )
  5306. add_docstr(
  5307. torch.amax,
  5308. r"""
  5309. amax(input, dim, keepdim=False, *, out=None) -> Tensor
  5310. Returns the maximum value of each slice of the :attr:`input` tensor in the given
  5311. dimension(s) :attr:`dim`.
  5312. .. note::
  5313. The difference between ``max``/``min`` and ``amax``/``amin`` is:
  5314. - ``amax``/``amin`` supports reducing on multiple dimensions,
  5315. - ``amax``/``amin`` does not return indices,
  5316. - ``amax``/``amin`` evenly distributes gradient between equal values,
  5317. while ``max(dim)``/``min(dim)`` propagates gradient only to a single
  5318. index in the source tensor.
  5319. {keepdim_details}
  5320. Args:
  5321. {input}
  5322. {dim}
  5323. {keepdim}
  5324. Keyword args:
  5325. {out}
  5326. Example::
  5327. >>> a = torch.randn(4, 4)
  5328. >>> a
  5329. tensor([[ 0.8177, 1.4878, -0.2491, 0.9130],
  5330. [-0.7158, 1.1775, 2.0992, 0.4817],
  5331. [-0.0053, 0.0164, -1.3738, -0.0507],
  5332. [ 1.9700, 1.1106, -1.0318, -1.0816]])
  5333. >>> torch.amax(a, 1)
  5334. tensor([1.4878, 2.0992, 0.0164, 1.9700])
  5335. """.format(
  5336. **multi_dim_common
  5337. ),
  5338. )
  5339. add_docstr(
  5340. torch.argmax,
  5341. r"""
  5342. argmax(input) -> LongTensor
  5343. Returns the indices of the maximum value of all elements in the :attr:`input` tensor.
  5344. This is the second value returned by :meth:`torch.max`. See its
  5345. documentation for the exact semantics of this method.
  5346. .. note:: If there are multiple maximal values then the indices of the first maximal value are returned.
  5347. Args:
  5348. {input}
  5349. Example::
  5350. >>> a = torch.randn(4, 4)
  5351. >>> a
  5352. tensor([[ 1.3398, 0.2663, -0.2686, 0.2450],
  5353. [-0.7401, -0.8805, -0.3402, -1.1936],
  5354. [ 0.4907, -1.3948, -1.0691, -0.3132],
  5355. [-1.6092, 0.5419, -0.2993, 0.3195]])
  5356. >>> torch.argmax(a)
  5357. tensor(0)
  5358. .. function:: argmax(input, dim, keepdim=False) -> LongTensor
  5359. :noindex:
  5360. Returns the indices of the maximum values of a tensor across a dimension.
  5361. This is the second value returned by :meth:`torch.max`. See its
  5362. documentation for the exact semantics of this method.
  5363. Args:
  5364. {input}
  5365. {dim} If ``None``, the argmax of the flattened input is returned.
  5366. {keepdim} Ignored if ``dim=None``.
  5367. Example::
  5368. >>> a = torch.randn(4, 4)
  5369. >>> a
  5370. tensor([[ 1.3398, 0.2663, -0.2686, 0.2450],
  5371. [-0.7401, -0.8805, -0.3402, -1.1936],
  5372. [ 0.4907, -1.3948, -1.0691, -0.3132],
  5373. [-1.6092, 0.5419, -0.2993, 0.3195]])
  5374. >>> torch.argmax(a, dim=1)
  5375. tensor([ 0, 2, 0, 1])
  5376. """.format(
  5377. **single_dim_common
  5378. ),
  5379. )
  5380. add_docstr(
  5381. torch.argwhere,
  5382. r"""
  5383. argwhere(input) -> Tensor
  5384. Returns a tensor containing the indices of all non-zero elements of
  5385. :attr:`input`. Each row in the result contains the indices of a non-zero
  5386. element in :attr:`input`. The result is sorted lexicographically, with
  5387. the last index changing the fastest (C-style).
  5388. If :attr:`input` has :math:`n` dimensions, then the resulting indices tensor
  5389. :attr:`out` is of size :math:`(z \times n)`, where :math:`z` is the total number of
  5390. non-zero elements in the :attr:`input` tensor.
  5391. .. note::
  5392. This function is similar to NumPy's `argwhere`.
  5393. When :attr:`input` is on CUDA, this function causes host-device synchronization.
  5394. Args:
  5395. {input}
  5396. Example::
  5397. >>> t = torch.tensor([1, 0, 1])
  5398. >>> torch.argwhere(t)
  5399. tensor([[0],
  5400. [2]])
  5401. >>> t = torch.tensor([[1, 0, 1], [0, 1, 1]])
  5402. >>> torch.argwhere(t)
  5403. tensor([[0, 0],
  5404. [0, 2],
  5405. [1, 1],
  5406. [1, 2]])
  5407. """,
  5408. )
  5409. add_docstr(
  5410. torch.mean,
  5411. r"""
  5412. mean(input, *, dtype=None) -> Tensor
  5413. Returns the mean value of all elements in the :attr:`input` tensor.
  5414. Args:
  5415. {input}
  5416. Keyword args:
  5417. {dtype}
  5418. Example::
  5419. >>> a = torch.randn(1, 3)
  5420. >>> a
  5421. tensor([[ 0.2294, -0.5481, 1.3288]])
  5422. >>> torch.mean(a)
  5423. tensor(0.3367)
  5424. .. function:: mean(input, dim, keepdim=False, *, dtype=None, out=None) -> Tensor
  5425. :noindex:
  5426. Returns the mean value of each row of the :attr:`input` tensor in the given
  5427. dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
  5428. reduce over all of them.
  5429. {keepdim_details}
  5430. Args:
  5431. {input}
  5432. {dim}
  5433. {keepdim}
  5434. Keyword args:
  5435. {dtype}
  5436. {out}
  5437. .. seealso::
  5438. :func:`torch.nanmean` computes the mean value of `non-NaN` elements.
  5439. Example::
  5440. >>> a = torch.randn(4, 4)
  5441. >>> a
  5442. tensor([[-0.3841, 0.6320, 0.4254, -0.7384],
  5443. [-0.9644, 1.0131, -0.6549, -1.4279],
  5444. [-0.2951, -1.3350, -0.7694, 0.5600],
  5445. [ 1.0842, -0.9580, 0.3623, 0.2343]])
  5446. >>> torch.mean(a, 1)
  5447. tensor([-0.0163, -0.5085, -0.4599, 0.1807])
  5448. >>> torch.mean(a, 1, True)
  5449. tensor([[-0.0163],
  5450. [-0.5085],
  5451. [-0.4599],
  5452. [ 0.1807]])
  5453. """.format(
  5454. **multi_dim_common
  5455. ),
  5456. )
  5457. add_docstr(
  5458. torch.nanmean,
  5459. r"""
  5460. nanmean(input, dim=None, keepdim=False, *, dtype=None, out=None) -> Tensor
  5461. Computes the mean of all `non-NaN` elements along the specified dimensions.
  5462. This function is identical to :func:`torch.mean` when there are no `NaN` values
  5463. in the :attr:`input` tensor. In the presence of `NaN`, :func:`torch.mean` will
  5464. propagate the `NaN` to the output whereas :func:`torch.nanmean` will ignore the
  5465. `NaN` values (`torch.nanmean(a)` is equivalent to `torch.mean(a[~a.isnan()])`).
  5466. {keepdim_details}
  5467. Args:
  5468. {input}
  5469. {opt_dim}
  5470. {keepdim}
  5471. Keyword args:
  5472. {dtype}
  5473. {out}
  5474. .. seealso::
  5475. :func:`torch.mean` computes the mean value, propagating `NaN`.
  5476. Example::
  5477. >>> x = torch.tensor([[torch.nan, 1, 2], [1, 2, 3]])
  5478. >>> x.mean()
  5479. tensor(nan)
  5480. >>> x.nanmean()
  5481. tensor(1.8000)
  5482. >>> x.mean(dim=0)
  5483. tensor([ nan, 1.5000, 2.5000])
  5484. >>> x.nanmean(dim=0)
  5485. tensor([1.0000, 1.5000, 2.5000])
  5486. # If all elements in the reduced dimensions are NaN then the result is NaN
  5487. >>> torch.tensor([torch.nan]).nanmean()
  5488. tensor(nan)
  5489. """.format(
  5490. **multi_dim_common
  5491. ),
  5492. )
  5493. add_docstr(
  5494. torch.median,
  5495. r"""
  5496. median(input) -> Tensor
  5497. Returns the median of the values in :attr:`input`.
  5498. .. note::
  5499. The median is not unique for :attr:`input` tensors with an even number
  5500. of elements. In this case the lower of the two medians is returned. To
  5501. compute the mean of both medians, use :func:`torch.quantile` with ``q=0.5`` instead.
  5502. .. warning::
  5503. This function produces deterministic (sub)gradients unlike ``median(dim=0)``
  5504. Args:
  5505. {input}
  5506. Example::
  5507. >>> a = torch.randn(1, 3)
  5508. >>> a
  5509. tensor([[ 1.5219, -1.5212, 0.2202]])
  5510. >>> torch.median(a)
  5511. tensor(0.2202)
  5512. .. function:: median(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
  5513. :noindex:
  5514. Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input`
  5515. in the dimension :attr:`dim`, and ``indices`` contains the index of the median values found in the dimension :attr:`dim`.
  5516. By default, :attr:`dim` is the last dimension of the :attr:`input` tensor.
  5517. If :attr:`keepdim` is ``True``, the output tensors are of the same size
  5518. as :attr:`input` except in the dimension :attr:`dim` where they are of size 1.
  5519. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
  5520. the outputs tensor having 1 fewer dimension than :attr:`input`.
  5521. .. note::
  5522. The median is not unique for :attr:`input` tensors with an even number
  5523. of elements in the dimension :attr:`dim`. In this case the lower of the
  5524. two medians is returned. To compute the mean of both medians in
  5525. :attr:`input`, use :func:`torch.quantile` with ``q=0.5`` instead.
  5526. .. warning::
  5527. ``indices`` does not necessarily contain the first occurrence of each
  5528. median value found, unless it is unique.
  5529. The exact implementation details are device-specific.
  5530. Do not expect the same result when run on CPU and GPU in general.
  5531. For the same reason do not expect the gradients to be deterministic.
  5532. Args:
  5533. {input}
  5534. {dim}
  5535. {keepdim}
  5536. Keyword args:
  5537. out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second
  5538. tensor, which must have dtype long, with their indices in the dimension
  5539. :attr:`dim` of :attr:`input`.
  5540. Example::
  5541. >>> a = torch.randn(4, 5)
  5542. >>> a
  5543. tensor([[ 0.2505, -0.3982, -0.9948, 0.3518, -1.3131],
  5544. [ 0.3180, -0.6993, 1.0436, 0.0438, 0.2270],
  5545. [-0.2751, 0.7303, 0.2192, 0.3321, 0.2488],
  5546. [ 1.0778, -1.9510, 0.7048, 0.4742, -0.7125]])
  5547. >>> torch.median(a, 1)
  5548. torch.return_types.median(values=tensor([-0.3982, 0.2270, 0.2488, 0.4742]), indices=tensor([1, 4, 4, 3]))
  5549. """.format(
  5550. **single_dim_common
  5551. ),
  5552. )
  5553. add_docstr(
  5554. torch.nanmedian,
  5555. r"""
  5556. nanmedian(input) -> Tensor
  5557. Returns the median of the values in :attr:`input`, ignoring ``NaN`` values.
  5558. This function is identical to :func:`torch.median` when there are no ``NaN`` values in :attr:`input`.
  5559. When :attr:`input` has one or more ``NaN`` values, :func:`torch.median` will always return ``NaN``,
  5560. while this function will return the median of the non-``NaN`` elements in :attr:`input`.
  5561. If all the elements in :attr:`input` are ``NaN`` it will also return ``NaN``.
  5562. Args:
  5563. {input}
  5564. Example::
  5565. >>> a = torch.tensor([1, float('nan'), 3, 2])
  5566. >>> a.median()
  5567. tensor(nan)
  5568. >>> a.nanmedian()
  5569. tensor(2.)
  5570. .. function:: nanmedian(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
  5571. :noindex:
  5572. Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input`
  5573. in the dimension :attr:`dim`, ignoring ``NaN`` values, and ``indices`` contains the index of the median values
  5574. found in the dimension :attr:`dim`.
  5575. This function is identical to :func:`torch.median` when there are no ``NaN`` values in a reduced row. When a reduced row has
  5576. one or more ``NaN`` values, :func:`torch.median` will always reduce it to ``NaN``, while this function will reduce it to the
  5577. median of the non-``NaN`` elements. If all the elements in a reduced row are ``NaN`` then it will be reduced to ``NaN``, too.
  5578. Args:
  5579. {input}
  5580. {dim}
  5581. {keepdim}
  5582. Keyword args:
  5583. out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second
  5584. tensor, which must have dtype long, with their indices in the dimension
  5585. :attr:`dim` of :attr:`input`.
  5586. Example::
  5587. >>> a = torch.tensor([[2, 3, 1], [float('nan'), 1, float('nan')]])
  5588. >>> a
  5589. tensor([[2., 3., 1.],
  5590. [nan, 1., nan]])
  5591. >>> a.median(0)
  5592. torch.return_types.median(values=tensor([nan, 1., nan]), indices=tensor([1, 1, 1]))
  5593. >>> a.nanmedian(0)
  5594. torch.return_types.nanmedian(values=tensor([2., 1., 1.]), indices=tensor([0, 1, 0]))
  5595. """.format(
  5596. **single_dim_common
  5597. ),
  5598. )
  5599. add_docstr(
  5600. torch.quantile,
  5601. r"""
  5602. quantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor
  5603. Computes the q-th quantiles of each row of the :attr:`input` tensor along the dimension :attr:`dim`.
  5604. To compute the quantile, we map q in [0, 1] to the range of indices [0, n] to find the location
  5605. of the quantile in the sorted input. If the quantile lies between two data points ``a < b`` with
  5606. indices ``i`` and ``j`` in the sorted order, result is computed according to the given
  5607. :attr:`interpolation` method as follows:
  5608. - ``linear``: ``a + (b - a) * fraction``, where ``fraction`` is the fractional part of the computed quantile index.
  5609. - ``lower``: ``a``.
  5610. - ``higher``: ``b``.
  5611. - ``nearest``: ``a`` or ``b``, whichever's index is closer to the computed quantile index (rounding down for .5 fractions).
  5612. - ``midpoint``: ``(a + b) / 2``.
  5613. If :attr:`q` is a 1D tensor, the first dimension of the output represents the quantiles and has size
  5614. equal to the size of :attr:`q`, the remaining dimensions are what remains from the reduction.
  5615. .. note::
  5616. By default :attr:`dim` is ``None`` resulting in the :attr:`input` tensor being flattened before computation.
  5617. Args:
  5618. {input}
  5619. q (float or Tensor): a scalar or 1D tensor of values in the range [0, 1].
  5620. {dim}
  5621. {keepdim}
  5622. Keyword arguments:
  5623. interpolation (str): interpolation method to use when the desired quantile lies between two data points.
  5624. Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``.
  5625. Default is ``linear``.
  5626. {out}
  5627. Example::
  5628. >>> a = torch.randn(2, 3)
  5629. >>> a
  5630. tensor([[ 0.0795, -1.2117, 0.9765],
  5631. [ 1.1707, 0.6706, 0.4884]])
  5632. >>> q = torch.tensor([0.25, 0.5, 0.75])
  5633. >>> torch.quantile(a, q, dim=1, keepdim=True)
  5634. tensor([[[-0.5661],
  5635. [ 0.5795]],
  5636. [[ 0.0795],
  5637. [ 0.6706]],
  5638. [[ 0.5280],
  5639. [ 0.9206]]])
  5640. >>> torch.quantile(a, q, dim=1, keepdim=True).shape
  5641. torch.Size([3, 2, 1])
  5642. >>> a = torch.arange(4.)
  5643. >>> a
  5644. tensor([0., 1., 2., 3.])
  5645. >>> torch.quantile(a, 0.6, interpolation='linear')
  5646. tensor(1.8000)
  5647. >>> torch.quantile(a, 0.6, interpolation='lower')
  5648. tensor(1.)
  5649. >>> torch.quantile(a, 0.6, interpolation='higher')
  5650. tensor(2.)
  5651. >>> torch.quantile(a, 0.6, interpolation='midpoint')
  5652. tensor(1.5000)
  5653. >>> torch.quantile(a, 0.6, interpolation='nearest')
  5654. tensor(2.)
  5655. >>> torch.quantile(a, 0.4, interpolation='nearest')
  5656. tensor(1.)
  5657. """.format(
  5658. **single_dim_common
  5659. ),
  5660. )
  5661. add_docstr(
  5662. torch.nanquantile,
  5663. r"""
  5664. nanquantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor
  5665. This is a variant of :func:`torch.quantile` that "ignores" ``NaN`` values,
  5666. computing the quantiles :attr:`q` as if ``NaN`` values in :attr:`input` did
  5667. not exist. If all values in a reduced row are ``NaN`` then the quantiles for
  5668. that reduction will be ``NaN``. See the documentation for :func:`torch.quantile`.
  5669. Args:
  5670. {input}
  5671. q (float or Tensor): a scalar or 1D tensor of quantile values in the range [0, 1]
  5672. {dim}
  5673. {keepdim}
  5674. Keyword arguments:
  5675. interpolation (str): interpolation method to use when the desired quantile lies between two data points.
  5676. Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``.
  5677. Default is ``linear``.
  5678. {out}
  5679. Example::
  5680. >>> t = torch.tensor([float('nan'), 1, 2])
  5681. >>> t.quantile(0.5)
  5682. tensor(nan)
  5683. >>> t.nanquantile(0.5)
  5684. tensor(1.5000)
  5685. >>> t = torch.tensor([[float('nan'), float('nan')], [1, 2]])
  5686. >>> t
  5687. tensor([[nan, nan],
  5688. [1., 2.]])
  5689. >>> t.nanquantile(0.5, dim=0)
  5690. tensor([1., 2.])
  5691. >>> t.nanquantile(0.5, dim=1)
  5692. tensor([ nan, 1.5000])
  5693. """.format(
  5694. **single_dim_common
  5695. ),
  5696. )
  5697. add_docstr(
  5698. torch.min,
  5699. r"""
  5700. min(input) -> Tensor
  5701. Returns the minimum value of all elements in the :attr:`input` tensor.
  5702. .. warning::
  5703. This function produces deterministic (sub)gradients unlike ``min(dim=0)``
  5704. Args:
  5705. {input}
  5706. Example::
  5707. >>> a = torch.randn(1, 3)
  5708. >>> a
  5709. tensor([[ 0.6750, 1.0857, 1.7197]])
  5710. >>> torch.min(a)
  5711. tensor(0.6750)
  5712. .. function:: min(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
  5713. :noindex:
  5714. Returns a namedtuple ``(values, indices)`` where ``values`` is the minimum
  5715. value of each row of the :attr:`input` tensor in the given dimension
  5716. :attr:`dim`. And ``indices`` is the index location of each minimum value found
  5717. (argmin).
  5718. If :attr:`keepdim` is ``True``, the output tensors are of the same size as
  5719. :attr:`input` except in the dimension :attr:`dim` where they are of size 1.
  5720. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
  5721. the output tensors having 1 fewer dimension than :attr:`input`.
  5722. .. note:: If there are multiple minimal values in a reduced row then
  5723. the indices of the first minimal value are returned.
  5724. Args:
  5725. {input}
  5726. {dim}
  5727. {keepdim}
  5728. Keyword args:
  5729. out (tuple, optional): the tuple of two output tensors (min, min_indices)
  5730. Example::
  5731. >>> a = torch.randn(4, 4)
  5732. >>> a
  5733. tensor([[-0.6248, 1.1334, -1.1899, -0.2803],
  5734. [-1.4644, -0.2635, -0.3651, 0.6134],
  5735. [ 0.2457, 0.0384, 1.0128, 0.7015],
  5736. [-0.1153, 2.9849, 2.1458, 0.5788]])
  5737. >>> torch.min(a, 1)
  5738. torch.return_types.min(values=tensor([-1.1899, -1.4644, 0.0384, -0.1153]), indices=tensor([2, 0, 1, 0]))
  5739. .. function:: min(input, other, *, out=None) -> Tensor
  5740. :noindex:
  5741. See :func:`torch.minimum`.
  5742. """.format(
  5743. **single_dim_common
  5744. ),
  5745. )
  5746. add_docstr(
  5747. torch.minimum,
  5748. r"""
  5749. minimum(input, other, *, out=None) -> Tensor
  5750. Computes the element-wise minimum of :attr:`input` and :attr:`other`.
  5751. .. note::
  5752. If one of the elements being compared is a NaN, then that element is returned.
  5753. :func:`minimum` is not supported for tensors with complex dtypes.
  5754. Args:
  5755. {input}
  5756. other (Tensor): the second input tensor
  5757. Keyword args:
  5758. {out}
  5759. Example::
  5760. >>> a = torch.tensor((1, 2, -1))
  5761. >>> b = torch.tensor((3, 0, 4))
  5762. >>> torch.minimum(a, b)
  5763. tensor([1, 0, -1])
  5764. """.format(
  5765. **common_args
  5766. ),
  5767. )
  5768. add_docstr(
  5769. torch.fmin,
  5770. r"""
  5771. fmin(input, other, *, out=None) -> Tensor
  5772. Computes the element-wise minimum of :attr:`input` and :attr:`other`.
  5773. This is like :func:`torch.minimum` except it handles NaNs differently:
  5774. if exactly one of the two elements being compared is a NaN then the non-NaN element is taken as the minimum.
  5775. Only if both elements are NaN is NaN propagated.
  5776. This function is a wrapper around C++'s ``std::fmin`` and is similar to NumPy's ``fmin`` function.
  5777. Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
  5778. :ref:`type promotion <type-promotion-doc>`, and integer and floating-point inputs.
  5779. Args:
  5780. {input}
  5781. other (Tensor): the second input tensor
  5782. Keyword args:
  5783. {out}
  5784. Example::
  5785. >>> a = torch.tensor([2.2, float('nan'), 2.1, float('nan')])
  5786. >>> b = torch.tensor([-9.3, 0.1, float('nan'), float('nan')])
  5787. >>> torch.fmin(a, b)
  5788. tensor([-9.3000, 0.1000, 2.1000, nan])
  5789. """.format(
  5790. **common_args
  5791. ),
  5792. )
  5793. add_docstr(
  5794. torch.amin,
  5795. r"""
  5796. amin(input, dim, keepdim=False, *, out=None) -> Tensor
  5797. Returns the minimum value of each slice of the :attr:`input` tensor in the given
  5798. dimension(s) :attr:`dim`.
  5799. .. note::
  5800. The difference between ``max``/``min`` and ``amax``/``amin`` is:
  5801. - ``amax``/``amin`` supports reducing on multiple dimensions,
  5802. - ``amax``/``amin`` does not return indices,
  5803. - ``amax``/``amin`` evenly distributes gradient between equal values,
  5804. while ``max(dim)``/``min(dim)`` propagates gradient only to a single
  5805. index in the source tensor.
  5806. {keepdim_details}
  5807. Args:
  5808. {input}
  5809. {dim}
  5810. {keepdim}
  5811. Keyword args:
  5812. {out}
  5813. Example::
  5814. >>> a = torch.randn(4, 4)
  5815. >>> a
  5816. tensor([[ 0.6451, -0.4866, 0.2987, -1.3312],
  5817. [-0.5744, 1.2980, 1.8397, -0.2713],
  5818. [ 0.9128, 0.9214, -1.7268, -0.2995],
  5819. [ 0.9023, 0.4853, 0.9075, -1.6165]])
  5820. >>> torch.amin(a, 1)
  5821. tensor([-1.3312, -0.5744, -1.7268, -1.6165])
  5822. """.format(
  5823. **multi_dim_common
  5824. ),
  5825. )
  5826. add_docstr(
  5827. torch.aminmax,
  5828. r"""
  5829. aminmax(input, *, dim=None, keepdim=False, out=None) -> (Tensor min, Tensor max)
  5830. Computes the minimum and maximum values of the :attr:`input` tensor.
  5831. Args:
  5832. input (Tensor):
  5833. The input tensor
  5834. Keyword Args:
  5835. dim (Optional[int]):
  5836. The dimension along which to compute the values. If `None`,
  5837. computes the values over the entire :attr:`input` tensor.
  5838. Default is `None`.
  5839. keepdim (bool):
  5840. If `True`, the reduced dimensions will be kept in the output
  5841. tensor as dimensions with size 1 for broadcasting, otherwise
  5842. they will be removed, as if calling (:func:`torch.squeeze`).
  5843. Default is `False`.
  5844. out (Optional[Tuple[Tensor, Tensor]]):
  5845. Optional tensors on which to write the result. Must have the same
  5846. shape and dtype as the expected output.
  5847. Default is `None`.
  5848. Returns:
  5849. A named tuple `(min, max)` containing the minimum and maximum values.
  5850. Raises:
  5851. RuntimeError
  5852. If any of the dimensions to compute the values over has size 0.
  5853. .. note::
  5854. NaN values are propagated to the output if at least one value is NaN.
  5855. .. seealso::
  5856. :func:`torch.amin` computes just the minimum value
  5857. :func:`torch.amax` computes just the maximum value
  5858. Example::
  5859. >>> torch.aminmax(torch.tensor([1, -3, 5]))
  5860. torch.return_types.aminmax(
  5861. min=tensor(-3),
  5862. max=tensor(5))
  5863. >>> # aminmax propagates NaNs
  5864. >>> torch.aminmax(torch.tensor([1, -3, 5, torch.nan]))
  5865. torch.return_types.aminmax(
  5866. min=tensor(nan),
  5867. max=tensor(nan))
  5868. >>> t = torch.arange(10).view(2, 5)
  5869. >>> t
  5870. tensor([[0, 1, 2, 3, 4],
  5871. [5, 6, 7, 8, 9]])
  5872. >>> t.aminmax(dim=0, keepdim=True)
  5873. torch.return_types.aminmax(
  5874. min=tensor([[0, 1, 2, 3, 4]]),
  5875. max=tensor([[5, 6, 7, 8, 9]]))
  5876. """,
  5877. )
  5878. add_docstr(
  5879. torch.argmin,
  5880. r"""
  5881. argmin(input, dim=None, keepdim=False) -> LongTensor
  5882. Returns the indices of the minimum value(s) of the flattened tensor or along a dimension
  5883. This is the second value returned by :meth:`torch.min`. See its
  5884. documentation for the exact semantics of this method.
  5885. .. note:: If there are multiple minimal values then the indices of the first minimal value are returned.
  5886. Args:
  5887. {input}
  5888. {dim} If ``None``, the argmin of the flattened input is returned.
  5889. {keepdim}.
  5890. Example::
  5891. >>> a = torch.randn(4, 4)
  5892. >>> a
  5893. tensor([[ 0.1139, 0.2254, -0.1381, 0.3687],
  5894. [ 1.0100, -1.1975, -0.0102, -0.4732],
  5895. [-0.9240, 0.1207, -0.7506, -1.0213],
  5896. [ 1.7809, -1.2960, 0.9384, 0.1438]])
  5897. >>> torch.argmin(a)
  5898. tensor(13)
  5899. >>> torch.argmin(a, dim=1)
  5900. tensor([ 2, 1, 3, 1])
  5901. >>> torch.argmin(a, dim=1, keepdim=True)
  5902. tensor([[2],
  5903. [1],
  5904. [3],
  5905. [1]])
  5906. """.format(
  5907. **single_dim_common
  5908. ),
  5909. )
  5910. add_docstr(
  5911. torch.mm,
  5912. r"""
  5913. mm(input, mat2, *, out=None) -> Tensor
  5914. Performs a matrix multiplication of the matrices :attr:`input` and :attr:`mat2`.
  5915. If :attr:`input` is a :math:`(n \times m)` tensor, :attr:`mat2` is a
  5916. :math:`(m \times p)` tensor, :attr:`out` will be a :math:`(n \times p)` tensor.
  5917. .. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
  5918. For broadcasting matrix products, see :func:`torch.matmul`.
  5919. Supports strided and sparse 2-D tensors as inputs, autograd with
  5920. respect to strided inputs.
  5921. This operation has support for arguments with :ref:`sparse layouts<sparse-docs>`.
  5922. If :attr:`out` is provided it's layout will be used. Otherwise, the result
  5923. layout will be deduced from that of :attr:`input`.
  5924. {sparse_beta_warning}
  5925. {tf32_note}
  5926. {rocm_fp16_note}
  5927. Args:
  5928. input (Tensor): the first matrix to be matrix multiplied
  5929. mat2 (Tensor): the second matrix to be matrix multiplied
  5930. Keyword args:
  5931. {out}
  5932. Example::
  5933. >>> mat1 = torch.randn(2, 3)
  5934. >>> mat2 = torch.randn(3, 3)
  5935. >>> torch.mm(mat1, mat2)
  5936. tensor([[ 0.4851, 0.5037, -0.3633],
  5937. [-0.0760, -3.6705, 2.4784]])
  5938. """.format(
  5939. **common_args, **tf32_notes, **rocm_fp16_notes, **sparse_support_notes
  5940. ),
  5941. )
  5942. add_docstr(
  5943. torch.hspmm,
  5944. r"""
  5945. hspmm(mat1, mat2, *, out=None) -> Tensor
  5946. Performs a matrix multiplication of a :ref:`sparse COO matrix
  5947. <sparse-coo-docs>` :attr:`mat1` and a strided matrix :attr:`mat2`. The
  5948. result is a (1 + 1)-dimensional :ref:`hybrid COO matrix
  5949. <sparse-hybrid-coo-docs>`.
  5950. Args:
  5951. mat1 (Tensor): the first sparse matrix to be matrix multiplied
  5952. mat2 (Tensor): the second strided matrix to be matrix multiplied
  5953. Keyword args:
  5954. {out}
  5955. """.format(
  5956. **common_args
  5957. ),
  5958. )
  5959. add_docstr(
  5960. torch.matmul,
  5961. r"""
  5962. matmul(input, other, *, out=None) -> Tensor
  5963. Matrix product of two tensors.
  5964. The behavior depends on the dimensionality of the tensors as follows:
  5965. - If both tensors are 1-dimensional, the dot product (scalar) is returned.
  5966. - If both arguments are 2-dimensional, the matrix-matrix product is returned.
  5967. - If the first argument is 1-dimensional and the second argument is 2-dimensional,
  5968. a 1 is prepended to its dimension for the purpose of the matrix multiply.
  5969. After the matrix multiply, the prepended dimension is removed.
  5970. - If the first argument is 2-dimensional and the second argument is 1-dimensional,
  5971. the matrix-vector product is returned.
  5972. - If both arguments are at least 1-dimensional and at least one argument is
  5973. N-dimensional (where N > 2), then a batched matrix multiply is returned. If the first
  5974. argument is 1-dimensional, a 1 is prepended to its dimension for the purpose of the
  5975. batched matrix multiply and removed after. If the second argument is 1-dimensional, a
  5976. 1 is appended to its dimension for the purpose of the batched matrix multiple and removed after.
  5977. The non-matrix (i.e. batch) dimensions are :ref:`broadcasted <broadcasting-semantics>` (and thus
  5978. must be broadcastable). For example, if :attr:`input` is a
  5979. :math:`(j \times 1 \times n \times n)` tensor and :attr:`other` is a :math:`(k \times n \times n)`
  5980. tensor, :attr:`out` will be a :math:`(j \times k \times n \times n)` tensor.
  5981. Note that the broadcasting logic only looks at the batch dimensions when determining if the inputs
  5982. are broadcastable, and not the matrix dimensions. For example, if :attr:`input` is a
  5983. :math:`(j \times 1 \times n \times m)` tensor and :attr:`other` is a :math:`(k \times m \times p)`
  5984. tensor, these inputs are valid for broadcasting even though the final two dimensions (i.e. the
  5985. matrix dimensions) are different. :attr:`out` will be a :math:`(j \times k \times n \times p)` tensor.
  5986. This operation has support for arguments with :ref:`sparse layouts<sparse-docs>`. In particular the
  5987. matrix-matrix (both arguments 2-dimensional) supports sparse arguments with the same restrictions
  5988. as :func:`torch.mm`
  5989. {sparse_beta_warning}
  5990. {tf32_note}
  5991. {rocm_fp16_note}
  5992. .. note::
  5993. The 1-dimensional dot product version of this function does not support an :attr:`out` parameter.
  5994. Arguments:
  5995. input (Tensor): the first tensor to be multiplied
  5996. other (Tensor): the second tensor to be multiplied
  5997. Keyword args:
  5998. {out}
  5999. Example::
  6000. >>> # vector x vector
  6001. >>> tensor1 = torch.randn(3)
  6002. >>> tensor2 = torch.randn(3)
  6003. >>> torch.matmul(tensor1, tensor2).size()
  6004. torch.Size([])
  6005. >>> # matrix x vector
  6006. >>> tensor1 = torch.randn(3, 4)
  6007. >>> tensor2 = torch.randn(4)
  6008. >>> torch.matmul(tensor1, tensor2).size()
  6009. torch.Size([3])
  6010. >>> # batched matrix x broadcasted vector
  6011. >>> tensor1 = torch.randn(10, 3, 4)
  6012. >>> tensor2 = torch.randn(4)
  6013. >>> torch.matmul(tensor1, tensor2).size()
  6014. torch.Size([10, 3])
  6015. >>> # batched matrix x batched matrix
  6016. >>> tensor1 = torch.randn(10, 3, 4)
  6017. >>> tensor2 = torch.randn(10, 4, 5)
  6018. >>> torch.matmul(tensor1, tensor2).size()
  6019. torch.Size([10, 3, 5])
  6020. >>> # batched matrix x broadcasted matrix
  6021. >>> tensor1 = torch.randn(10, 3, 4)
  6022. >>> tensor2 = torch.randn(4, 5)
  6023. >>> torch.matmul(tensor1, tensor2).size()
  6024. torch.Size([10, 3, 5])
  6025. """.format(
  6026. **common_args, **tf32_notes, **rocm_fp16_notes, **sparse_support_notes
  6027. ),
  6028. )
  6029. add_docstr(
  6030. torch.mode,
  6031. r"""
  6032. mode(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
  6033. Returns a namedtuple ``(values, indices)`` where ``values`` is the mode
  6034. value of each row of the :attr:`input` tensor in the given dimension
  6035. :attr:`dim`, i.e. a value which appears most often
  6036. in that row, and ``indices`` is the index location of each mode value found.
  6037. By default, :attr:`dim` is the last dimension of the :attr:`input` tensor.
  6038. If :attr:`keepdim` is ``True``, the output tensors are of the same size as
  6039. :attr:`input` except in the dimension :attr:`dim` where they are of size 1.
  6040. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting
  6041. in the output tensors having 1 fewer dimension than :attr:`input`.
  6042. .. note:: This function is not defined for ``torch.cuda.Tensor`` yet.
  6043. Args:
  6044. {input}
  6045. {dim}
  6046. {keepdim}
  6047. Keyword args:
  6048. out (tuple, optional): the result tuple of two output tensors (values, indices)
  6049. Example::
  6050. >>> a = torch.randint(10, (5,))
  6051. >>> a
  6052. tensor([6, 5, 1, 0, 2])
  6053. >>> b = a + (torch.randn(50, 1) * 5).long()
  6054. >>> torch.mode(b, 0)
  6055. torch.return_types.mode(values=tensor([6, 5, 1, 0, 2]), indices=tensor([2, 2, 2, 2, 2]))
  6056. """.format(
  6057. **single_dim_common
  6058. ),
  6059. )
  6060. add_docstr(
  6061. torch.mul,
  6062. r"""
  6063. mul(input, other, *, out=None) -> Tensor
  6064. Multiplies :attr:`input` by :attr:`other`.
  6065. .. math::
  6066. \text{out}_i = \text{input}_i \times \text{other}_i
  6067. """
  6068. + r"""
  6069. Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
  6070. :ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
  6071. Args:
  6072. {input}
  6073. other (Tensor or Number) - the tensor or number to multiply input by.
  6074. Keyword args:
  6075. {out}
  6076. Examples::
  6077. >>> a = torch.randn(3)
  6078. >>> a
  6079. tensor([ 0.2015, -0.4255, 2.6087])
  6080. >>> torch.mul(a, 100)
  6081. tensor([ 20.1494, -42.5491, 260.8663])
  6082. >>> b = torch.randn(4, 1)
  6083. >>> b
  6084. tensor([[ 1.1207],
  6085. [-0.3137],
  6086. [ 0.0700],
  6087. [ 0.8378]])
  6088. >>> c = torch.randn(1, 4)
  6089. >>> c
  6090. tensor([[ 0.5146, 0.1216, -0.5244, 2.2382]])
  6091. >>> torch.mul(b, c)
  6092. tensor([[ 0.5767, 0.1363, -0.5877, 2.5083],
  6093. [-0.1614, -0.0382, 0.1645, -0.7021],
  6094. [ 0.0360, 0.0085, -0.0367, 0.1567],
  6095. [ 0.4312, 0.1019, -0.4394, 1.8753]])
  6096. """.format(
  6097. **common_args
  6098. ),
  6099. )
  6100. add_docstr(
  6101. torch.multiply,
  6102. r"""
  6103. multiply(input, other, *, out=None)
  6104. Alias for :func:`torch.mul`.
  6105. """,
  6106. )
  6107. add_docstr(
  6108. torch.multinomial,
  6109. r"""
  6110. multinomial(input, num_samples, replacement=False, *, generator=None, out=None) -> LongTensor
  6111. Returns a tensor where each row contains :attr:`num_samples` indices sampled
  6112. from the multinomial probability distribution located in the corresponding row
  6113. of tensor :attr:`input`.
  6114. .. note::
  6115. The rows of :attr:`input` do not need to sum to one (in which case we use
  6116. the values as weights), but must be non-negative, finite and have
  6117. a non-zero sum.
  6118. Indices are ordered from left to right according to when each was sampled
  6119. (first samples are placed in first column).
  6120. If :attr:`input` is a vector, :attr:`out` is a vector of size :attr:`num_samples`.
  6121. If :attr:`input` is a matrix with `m` rows, :attr:`out` is an matrix of shape
  6122. :math:`(m \times \text{{num\_samples}})`.
  6123. If replacement is ``True``, samples are drawn with replacement.
  6124. If not, they are drawn without replacement, which means that when a
  6125. sample index is drawn for a row, it cannot be drawn again for that row.
  6126. .. note::
  6127. When drawn without replacement, :attr:`num_samples` must be lower than
  6128. number of non-zero elements in :attr:`input` (or the min number of non-zero
  6129. elements in each row of :attr:`input` if it is a matrix).
  6130. Args:
  6131. input (Tensor): the input tensor containing probabilities
  6132. num_samples (int): number of samples to draw
  6133. replacement (bool, optional): whether to draw with replacement or not
  6134. Keyword args:
  6135. {generator}
  6136. {out}
  6137. Example::
  6138. >>> weights = torch.tensor([0, 10, 3, 0], dtype=torch.float) # create a tensor of weights
  6139. >>> torch.multinomial(weights, 2)
  6140. tensor([1, 2])
  6141. >>> torch.multinomial(weights, 4) # ERROR!
  6142. RuntimeError: invalid argument 2: invalid multinomial distribution (with replacement=False,
  6143. not enough non-negative category to sample) at ../aten/src/TH/generic/THTensorRandom.cpp:320
  6144. >>> torch.multinomial(weights, 4, replacement=True)
  6145. tensor([ 2, 1, 1, 1])
  6146. """.format(
  6147. **common_args
  6148. ),
  6149. )
  6150. add_docstr(
  6151. torch.mv,
  6152. r"""
  6153. mv(input, vec, *, out=None) -> Tensor
  6154. Performs a matrix-vector product of the matrix :attr:`input` and the vector
  6155. :attr:`vec`.
  6156. If :attr:`input` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of
  6157. size :math:`m`, :attr:`out` will be 1-D of size :math:`n`.
  6158. .. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
  6159. Args:
  6160. input (Tensor): matrix to be multiplied
  6161. vec (Tensor): vector to be multiplied
  6162. Keyword args:
  6163. {out}
  6164. Example::
  6165. >>> mat = torch.randn(2, 3)
  6166. >>> vec = torch.randn(3)
  6167. >>> torch.mv(mat, vec)
  6168. tensor([ 1.0404, -0.6361])
  6169. """.format(
  6170. **common_args
  6171. ),
  6172. )
  6173. add_docstr(
  6174. torch.mvlgamma,
  6175. r"""
  6176. mvlgamma(input, p, *, out=None) -> Tensor
  6177. Alias for :func:`torch.special.multigammaln`.
  6178. """,
  6179. )
  6180. add_docstr(
  6181. torch.movedim,
  6182. r"""
  6183. movedim(input, source, destination) -> Tensor
  6184. Moves the dimension(s) of :attr:`input` at the position(s) in :attr:`source`
  6185. to the position(s) in :attr:`destination`.
  6186. Other dimensions of :attr:`input` that are not explicitly moved remain in
  6187. their original order and appear at the positions not specified in :attr:`destination`.
  6188. Args:
  6189. {input}
  6190. source (int or tuple of ints): Original positions of the dims to move. These must be unique.
  6191. destination (int or tuple of ints): Destination positions for each of the original dims. These must also be unique.
  6192. Examples::
  6193. >>> t = torch.randn(3,2,1)
  6194. >>> t
  6195. tensor([[[-0.3362],
  6196. [-0.8437]],
  6197. [[-0.9627],
  6198. [ 0.1727]],
  6199. [[ 0.5173],
  6200. [-0.1398]]])
  6201. >>> torch.movedim(t, 1, 0).shape
  6202. torch.Size([2, 3, 1])
  6203. >>> torch.movedim(t, 1, 0)
  6204. tensor([[[-0.3362],
  6205. [-0.9627],
  6206. [ 0.5173]],
  6207. [[-0.8437],
  6208. [ 0.1727],
  6209. [-0.1398]]])
  6210. >>> torch.movedim(t, (1, 2), (0, 1)).shape
  6211. torch.Size([2, 1, 3])
  6212. >>> torch.movedim(t, (1, 2), (0, 1))
  6213. tensor([[[-0.3362, -0.9627, 0.5173]],
  6214. [[-0.8437, 0.1727, -0.1398]]])
  6215. """.format(
  6216. **common_args
  6217. ),
  6218. )
  6219. add_docstr(
  6220. torch.moveaxis,
  6221. r"""
  6222. moveaxis(input, source, destination) -> Tensor
  6223. Alias for :func:`torch.movedim`.
  6224. This function is equivalent to NumPy's moveaxis function.
  6225. Examples::
  6226. >>> t = torch.randn(3,2,1)
  6227. >>> t
  6228. tensor([[[-0.3362],
  6229. [-0.8437]],
  6230. [[-0.9627],
  6231. [ 0.1727]],
  6232. [[ 0.5173],
  6233. [-0.1398]]])
  6234. >>> torch.moveaxis(t, 1, 0).shape
  6235. torch.Size([2, 3, 1])
  6236. >>> torch.moveaxis(t, 1, 0)
  6237. tensor([[[-0.3362],
  6238. [-0.9627],
  6239. [ 0.5173]],
  6240. [[-0.8437],
  6241. [ 0.1727],
  6242. [-0.1398]]])
  6243. >>> torch.moveaxis(t, (1, 2), (0, 1)).shape
  6244. torch.Size([2, 1, 3])
  6245. >>> torch.moveaxis(t, (1, 2), (0, 1))
  6246. tensor([[[-0.3362, -0.9627, 0.5173]],
  6247. [[-0.8437, 0.1727, -0.1398]]])
  6248. """.format(
  6249. **common_args
  6250. ),
  6251. )
  6252. add_docstr(
  6253. torch.swapdims,
  6254. r"""
  6255. swapdims(input, dim0, dim1) -> Tensor
  6256. Alias for :func:`torch.transpose`.
  6257. This function is equivalent to NumPy's swapaxes function.
  6258. Examples::
  6259. >>> x = torch.tensor([[[0,1],[2,3]],[[4,5],[6,7]]])
  6260. >>> x
  6261. tensor([[[0, 1],
  6262. [2, 3]],
  6263. [[4, 5],
  6264. [6, 7]]])
  6265. >>> torch.swapdims(x, 0, 1)
  6266. tensor([[[0, 1],
  6267. [4, 5]],
  6268. [[2, 3],
  6269. [6, 7]]])
  6270. >>> torch.swapdims(x, 0, 2)
  6271. tensor([[[0, 4],
  6272. [2, 6]],
  6273. [[1, 5],
  6274. [3, 7]]])
  6275. """.format(
  6276. **common_args
  6277. ),
  6278. )
  6279. add_docstr(
  6280. torch.swapaxes,
  6281. r"""
  6282. swapaxes(input, axis0, axis1) -> Tensor
  6283. Alias for :func:`torch.transpose`.
  6284. This function is equivalent to NumPy's swapaxes function.
  6285. Examples::
  6286. >>> x = torch.tensor([[[0,1],[2,3]],[[4,5],[6,7]]])
  6287. >>> x
  6288. tensor([[[0, 1],
  6289. [2, 3]],
  6290. [[4, 5],
  6291. [6, 7]]])
  6292. >>> torch.swapaxes(x, 0, 1)
  6293. tensor([[[0, 1],
  6294. [4, 5]],
  6295. [[2, 3],
  6296. [6, 7]]])
  6297. >>> torch.swapaxes(x, 0, 2)
  6298. tensor([[[0, 4],
  6299. [2, 6]],
  6300. [[1, 5],
  6301. [3, 7]]])
  6302. """.format(
  6303. **common_args
  6304. ),
  6305. )
  6306. add_docstr(
  6307. torch.narrow,
  6308. r"""
  6309. narrow(input, dim, start, length) -> Tensor
  6310. Returns a new tensor that is a narrowed version of :attr:`input` tensor. The
  6311. dimension :attr:`dim` is input from :attr:`start` to ``start + length``. The
  6312. returned tensor and :attr:`input` tensor share the same underlying storage.
  6313. Args:
  6314. input (Tensor): the tensor to narrow
  6315. dim (int): the dimension along which to narrow
  6316. start (int or Tensor): index of the element to start the narrowed dimension
  6317. from. Can be negative, which means indexing from the end of `dim`. If
  6318. `Tensor`, it must be an 0-dim integral `Tensor` (bools not allowed)
  6319. length (int): length of the narrowed dimension, must be weakly positive
  6320. Example::
  6321. >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
  6322. >>> torch.narrow(x, 0, 0, 2)
  6323. tensor([[ 1, 2, 3],
  6324. [ 4, 5, 6]])
  6325. >>> torch.narrow(x, 1, 1, 2)
  6326. tensor([[ 2, 3],
  6327. [ 5, 6],
  6328. [ 8, 9]])
  6329. >>> torch.narrow(x, -1, torch.tensor(-1), 1)
  6330. tensor([[3],
  6331. [6],
  6332. [9]])
  6333. """,
  6334. )
  6335. add_docstr(
  6336. torch.narrow_copy,
  6337. r"""
  6338. narrow_copy(input, dim, start, length, *, out=None) -> Tensor
  6339. Same as :meth:`Tensor.narrow` except this returns a copy rather
  6340. than shared storage. This is primarily for sparse tensors, which
  6341. do not have a shared-storage narrow method.
  6342. Args:
  6343. input (Tensor): the tensor to narrow
  6344. dim (int): the dimension along which to narrow
  6345. start (int): index of the element to start the narrowed dimension from. Can
  6346. be negative, which means indexing from the end of `dim`
  6347. length (int): length of the narrowed dimension, must be weakly positive
  6348. Keyword args:
  6349. {out}
  6350. Example::
  6351. >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
  6352. >>> torch.narrow_copy(x, 0, 0, 2)
  6353. tensor([[ 1, 2, 3],
  6354. [ 4, 5, 6]])
  6355. >>> torch.narrow_copy(x, 1, 1, 2)
  6356. tensor([[ 2, 3],
  6357. [ 5, 6],
  6358. [ 8, 9]])
  6359. >>> s = torch.arange(16).reshape(2, 2, 2, 2).to_sparse(2)
  6360. >>> torch.narrow_copy(s, 0, 0, 1)
  6361. tensor(indices=tensor([[0, 0],
  6362. [0, 1]]),
  6363. values=tensor([[[0, 1],
  6364. [2, 3]],
  6365. [[4, 5],
  6366. [6, 7]]]),
  6367. size=(1, 2, 2, 2), nnz=2, layout=torch.sparse_coo)
  6368. .. seealso::
  6369. :func:`torch.narrow` for a non copy variant
  6370. """.format(
  6371. **common_args
  6372. ),
  6373. )
  6374. add_docstr(
  6375. torch.nan_to_num,
  6376. r"""
  6377. nan_to_num(input, nan=0.0, posinf=None, neginf=None, *, out=None) -> Tensor
  6378. Replaces :literal:`NaN`, positive infinity, and negative infinity values in :attr:`input`
  6379. with the values specified by :attr:`nan`, :attr:`posinf`, and :attr:`neginf`, respectively.
  6380. By default, :literal:`NaN`\ s are replaced with zero, positive infinity is replaced with the
  6381. greatest finite value representable by :attr:`input`'s dtype, and negative infinity
  6382. is replaced with the least finite value representable by :attr:`input`'s dtype.
  6383. Args:
  6384. {input}
  6385. nan (Number, optional): the value to replace :literal:`NaN`\s with. Default is zero.
  6386. posinf (Number, optional): if a Number, the value to replace positive infinity values with.
  6387. If None, positive infinity values are replaced with the greatest finite value representable by :attr:`input`'s dtype.
  6388. Default is None.
  6389. neginf (Number, optional): if a Number, the value to replace negative infinity values with.
  6390. If None, negative infinity values are replaced with the lowest finite value representable by :attr:`input`'s dtype.
  6391. Default is None.
  6392. Keyword args:
  6393. {out}
  6394. Example::
  6395. >>> x = torch.tensor([float('nan'), float('inf'), -float('inf'), 3.14])
  6396. >>> torch.nan_to_num(x)
  6397. tensor([ 0.0000e+00, 3.4028e+38, -3.4028e+38, 3.1400e+00])
  6398. >>> torch.nan_to_num(x, nan=2.0)
  6399. tensor([ 2.0000e+00, 3.4028e+38, -3.4028e+38, 3.1400e+00])
  6400. >>> torch.nan_to_num(x, nan=2.0, posinf=1.0)
  6401. tensor([ 2.0000e+00, 1.0000e+00, -3.4028e+38, 3.1400e+00])
  6402. """.format(
  6403. **common_args
  6404. ),
  6405. )
  6406. add_docstr(
  6407. torch.ne,
  6408. r"""
  6409. ne(input, other, *, out=None) -> Tensor
  6410. Computes :math:`\text{input} \neq \text{other}` element-wise.
  6411. """
  6412. + r"""
  6413. The second argument can be a number or a tensor whose shape is
  6414. :ref:`broadcastable <broadcasting-semantics>` with the first argument.
  6415. Args:
  6416. input (Tensor): the tensor to compare
  6417. other (Tensor or float): the tensor or value to compare
  6418. Keyword args:
  6419. {out}
  6420. Returns:
  6421. A boolean tensor that is True where :attr:`input` is not equal to :attr:`other` and False elsewhere
  6422. Example::
  6423. >>> torch.ne(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
  6424. tensor([[False, True], [True, False]])
  6425. """.format(
  6426. **common_args
  6427. ),
  6428. )
  6429. add_docstr(
  6430. torch.not_equal,
  6431. r"""
  6432. not_equal(input, other, *, out=None) -> Tensor
  6433. Alias for :func:`torch.ne`.
  6434. """,
  6435. )
  6436. add_docstr(
  6437. torch.neg,
  6438. r"""
  6439. neg(input, *, out=None) -> Tensor
  6440. Returns a new tensor with the negative of the elements of :attr:`input`.
  6441. .. math::
  6442. \text{out} = -1 \times \text{input}
  6443. """
  6444. + r"""
  6445. Args:
  6446. {input}
  6447. Keyword args:
  6448. {out}
  6449. Example::
  6450. >>> a = torch.randn(5)
  6451. >>> a
  6452. tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940])
  6453. >>> torch.neg(a)
  6454. tensor([-0.0090, 0.2262, 0.0682, 0.2866, -0.3940])
  6455. """.format(
  6456. **common_args
  6457. ),
  6458. )
  6459. add_docstr(
  6460. torch.negative,
  6461. r"""
  6462. negative(input, *, out=None) -> Tensor
  6463. Alias for :func:`torch.neg`
  6464. """,
  6465. )
  6466. add_docstr(
  6467. torch.nextafter,
  6468. r"""
  6469. nextafter(input, other, *, out=None) -> Tensor
  6470. Return the next floating-point value after :attr:`input` towards :attr:`other`, elementwise.
  6471. The shapes of ``input`` and ``other`` must be
  6472. :ref:`broadcastable <broadcasting-semantics>`.
  6473. Args:
  6474. input (Tensor): the first input tensor
  6475. other (Tensor): the second input tensor
  6476. Keyword args:
  6477. {out}
  6478. Example::
  6479. >>> eps = torch.finfo(torch.float32).eps
  6480. >>> torch.nextafter(torch.tensor([1.0, 2.0]), torch.tensor([2.0, 1.0])) == torch.tensor([eps + 1, 2 - eps])
  6481. tensor([True, True])
  6482. """.format(
  6483. **common_args
  6484. ),
  6485. )
  6486. add_docstr(
  6487. torch.nonzero,
  6488. r"""
  6489. nonzero(input, *, out=None, as_tuple=False) -> LongTensor or tuple of LongTensors
  6490. .. note::
  6491. :func:`torch.nonzero(..., as_tuple=False) <torch.nonzero>` (default) returns a
  6492. 2-D tensor where each row is the index for a nonzero value.
  6493. :func:`torch.nonzero(..., as_tuple=True) <torch.nonzero>` returns a tuple of 1-D
  6494. index tensors, allowing for advanced indexing, so ``x[x.nonzero(as_tuple=True)]``
  6495. gives all nonzero values of tensor ``x``. Of the returned tuple, each index tensor
  6496. contains nonzero indices for a certain dimension.
  6497. See below for more details on the two behaviors.
  6498. When :attr:`input` is on CUDA, :func:`torch.nonzero() <torch.nonzero>` causes
  6499. host-device synchronization.
  6500. **When** :attr:`as_tuple` **is** ``False`` **(default)**:
  6501. Returns a tensor containing the indices of all non-zero elements of
  6502. :attr:`input`. Each row in the result contains the indices of a non-zero
  6503. element in :attr:`input`. The result is sorted lexicographically, with
  6504. the last index changing the fastest (C-style).
  6505. If :attr:`input` has :math:`n` dimensions, then the resulting indices tensor
  6506. :attr:`out` is of size :math:`(z \times n)`, where :math:`z` is the total number of
  6507. non-zero elements in the :attr:`input` tensor.
  6508. **When** :attr:`as_tuple` **is** ``True``:
  6509. Returns a tuple of 1-D tensors, one for each dimension in :attr:`input`,
  6510. each containing the indices (in that dimension) of all non-zero elements of
  6511. :attr:`input` .
  6512. If :attr:`input` has :math:`n` dimensions, then the resulting tuple contains :math:`n`
  6513. tensors of size :math:`z`, where :math:`z` is the total number of
  6514. non-zero elements in the :attr:`input` tensor.
  6515. As a special case, when :attr:`input` has zero dimensions and a nonzero scalar
  6516. value, it is treated as a one-dimensional tensor with one element.
  6517. Args:
  6518. {input}
  6519. Keyword args:
  6520. out (LongTensor, optional): the output tensor containing indices
  6521. Returns:
  6522. LongTensor or tuple of LongTensor: If :attr:`as_tuple` is ``False``, the output
  6523. tensor containing indices. If :attr:`as_tuple` is ``True``, one 1-D tensor for
  6524. each dimension, containing the indices of each nonzero element along that
  6525. dimension.
  6526. Example::
  6527. >>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1]))
  6528. tensor([[ 0],
  6529. [ 1],
  6530. [ 2],
  6531. [ 4]])
  6532. >>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0],
  6533. ... [0.0, 0.4, 0.0, 0.0],
  6534. ... [0.0, 0.0, 1.2, 0.0],
  6535. ... [0.0, 0.0, 0.0,-0.4]]))
  6536. tensor([[ 0, 0],
  6537. [ 1, 1],
  6538. [ 2, 2],
  6539. [ 3, 3]])
  6540. >>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1]), as_tuple=True)
  6541. (tensor([0, 1, 2, 4]),)
  6542. >>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0],
  6543. ... [0.0, 0.4, 0.0, 0.0],
  6544. ... [0.0, 0.0, 1.2, 0.0],
  6545. ... [0.0, 0.0, 0.0,-0.4]]), as_tuple=True)
  6546. (tensor([0, 1, 2, 3]), tensor([0, 1, 2, 3]))
  6547. >>> torch.nonzero(torch.tensor(5), as_tuple=True)
  6548. (tensor([0]),)
  6549. """.format(
  6550. **common_args
  6551. ),
  6552. )
  6553. add_docstr(
  6554. torch.normal,
  6555. r"""
  6556. normal(mean, std, *, generator=None, out=None) -> Tensor
  6557. Returns a tensor of random numbers drawn from separate normal distributions
  6558. whose mean and standard deviation are given.
  6559. The :attr:`mean` is a tensor with the mean of
  6560. each output element's normal distribution
  6561. The :attr:`std` is a tensor with the standard deviation of
  6562. each output element's normal distribution
  6563. The shapes of :attr:`mean` and :attr:`std` don't need to match, but the
  6564. total number of elements in each tensor need to be the same.
  6565. .. note:: When the shapes do not match, the shape of :attr:`mean`
  6566. is used as the shape for the returned output tensor
  6567. .. note:: When :attr:`std` is a CUDA tensor, this function synchronizes
  6568. its device with the CPU.
  6569. Args:
  6570. mean (Tensor): the tensor of per-element means
  6571. std (Tensor): the tensor of per-element standard deviations
  6572. Keyword args:
  6573. {generator}
  6574. {out}
  6575. Example::
  6576. >>> torch.normal(mean=torch.arange(1., 11.), std=torch.arange(1, 0, -0.1))
  6577. tensor([ 1.0425, 3.5672, 2.7969, 4.2925, 4.7229, 6.2134,
  6578. 8.0505, 8.1408, 9.0563, 10.0566])
  6579. .. function:: normal(mean=0.0, std, *, out=None) -> Tensor
  6580. :noindex:
  6581. Similar to the function above, but the means are shared among all drawn
  6582. elements.
  6583. Args:
  6584. mean (float, optional): the mean for all distributions
  6585. std (Tensor): the tensor of per-element standard deviations
  6586. Keyword args:
  6587. {out}
  6588. Example::
  6589. >>> torch.normal(mean=0.5, std=torch.arange(1., 6.))
  6590. tensor([-1.2793, -1.0732, -2.0687, 5.1177, -1.2303])
  6591. .. function:: normal(mean, std=1.0, *, out=None) -> Tensor
  6592. :noindex:
  6593. Similar to the function above, but the standard deviations are shared among
  6594. all drawn elements.
  6595. Args:
  6596. mean (Tensor): the tensor of per-element means
  6597. std (float, optional): the standard deviation for all distributions
  6598. Keyword args:
  6599. out (Tensor, optional): the output tensor
  6600. Example::
  6601. >>> torch.normal(mean=torch.arange(1., 6.))
  6602. tensor([ 1.1552, 2.6148, 2.6535, 5.8318, 4.2361])
  6603. .. function:: normal(mean, std, size, *, out=None) -> Tensor
  6604. :noindex:
  6605. Similar to the function above, but the means and standard deviations are shared
  6606. among all drawn elements. The resulting tensor has size given by :attr:`size`.
  6607. Args:
  6608. mean (float): the mean for all distributions
  6609. std (float): the standard deviation for all distributions
  6610. size (int...): a sequence of integers defining the shape of the output tensor.
  6611. Keyword args:
  6612. {out}
  6613. Example::
  6614. >>> torch.normal(2, 3, size=(1, 4))
  6615. tensor([[-1.3987, -1.9544, 3.6048, 0.7909]])
  6616. """.format(
  6617. **common_args
  6618. ),
  6619. )
  6620. add_docstr(
  6621. torch.numel,
  6622. r"""
  6623. numel(input) -> int
  6624. Returns the total number of elements in the :attr:`input` tensor.
  6625. Args:
  6626. {input}
  6627. Example::
  6628. >>> a = torch.randn(1, 2, 3, 4, 5)
  6629. >>> torch.numel(a)
  6630. 120
  6631. >>> a = torch.zeros(4,4)
  6632. >>> torch.numel(a)
  6633. 16
  6634. """.format(
  6635. **common_args
  6636. ),
  6637. )
  6638. add_docstr(
  6639. torch.ones,
  6640. r"""
  6641. ones(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  6642. Returns a tensor filled with the scalar value `1`, with the shape defined
  6643. by the variable argument :attr:`size`.
  6644. Args:
  6645. size (int...): a sequence of integers defining the shape of the output tensor.
  6646. Can be a variable number of arguments or a collection like a list or tuple.
  6647. Keyword arguments:
  6648. {out}
  6649. {dtype}
  6650. {layout}
  6651. {device}
  6652. {requires_grad}
  6653. Example::
  6654. >>> torch.ones(2, 3)
  6655. tensor([[ 1., 1., 1.],
  6656. [ 1., 1., 1.]])
  6657. >>> torch.ones(5)
  6658. tensor([ 1., 1., 1., 1., 1.])
  6659. """.format(
  6660. **factory_common_args
  6661. ),
  6662. )
  6663. add_docstr(
  6664. torch.ones_like,
  6665. r"""
  6666. ones_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
  6667. Returns a tensor filled with the scalar value `1`, with the same size as
  6668. :attr:`input`. ``torch.ones_like(input)`` is equivalent to
  6669. ``torch.ones(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
  6670. .. warning::
  6671. As of 0.4, this function does not support an :attr:`out` keyword. As an alternative,
  6672. the old ``torch.ones_like(input, out=output)`` is equivalent to
  6673. ``torch.ones(input.size(), out=output)``.
  6674. Args:
  6675. {input}
  6676. Keyword arguments:
  6677. {dtype}
  6678. {layout}
  6679. {device}
  6680. {requires_grad}
  6681. {memory_format}
  6682. Example::
  6683. >>> input = torch.empty(2, 3)
  6684. >>> torch.ones_like(input)
  6685. tensor([[ 1., 1., 1.],
  6686. [ 1., 1., 1.]])
  6687. """.format(
  6688. **factory_like_common_args
  6689. ),
  6690. )
  6691. add_docstr(
  6692. torch.orgqr,
  6693. r"""
  6694. orgqr(input, tau) -> Tensor
  6695. Alias for :func:`torch.linalg.householder_product`.
  6696. """,
  6697. )
  6698. add_docstr(
  6699. torch.ormqr,
  6700. r"""
  6701. ormqr(input, tau, other, left=True, transpose=False, *, out=None) -> Tensor
  6702. Computes the matrix-matrix multiplication of a product of Householder matrices with a general matrix.
  6703. Multiplies a :math:`m \times n` matrix `C` (given by :attr:`other`) with a matrix `Q`,
  6704. where `Q` is represented using Householder reflectors `(input, tau)`.
  6705. See `Representation of Orthogonal or Unitary Matrices`_ for further details.
  6706. If :attr:`left` is `True` then `op(Q)` times `C` is computed, otherwise the result is `C` times `op(Q)`.
  6707. When :attr:`left` is `True`, the implicit matrix `Q` has size :math:`m \times m`.
  6708. It has size :math:`n \times n` otherwise.
  6709. If :attr:`transpose` is `True` then `op` is the conjugate transpose operation, otherwise it's a no-op.
  6710. Supports inputs of float, double, cfloat and cdouble dtypes.
  6711. Also supports batched inputs, and, if the input is batched, the output is batched with the same dimensions.
  6712. .. seealso::
  6713. :func:`torch.geqrf` can be used to form the Householder representation `(input, tau)` of matrix `Q`
  6714. from the QR decomposition.
  6715. .. note::
  6716. This function supports backward but it is only fast when ``(input, tau)`` do not require gradients
  6717. and/or ``tau.size(-1)`` is very small.
  6718. ``
  6719. Args:
  6720. input (Tensor): tensor of shape `(*, mn, k)` where `*` is zero or more batch dimensions
  6721. and `mn` equals to `m` or `n` depending on the :attr:`left`.
  6722. tau (Tensor): tensor of shape `(*, min(mn, k))` where `*` is zero or more batch dimensions.
  6723. other (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
  6724. left (bool): controls the order of multiplication.
  6725. transpose (bool): controls whether the matrix `Q` is conjugate transposed or not.
  6726. Keyword args:
  6727. out (Tensor, optional): the output Tensor. Ignored if `None`. Default: `None`.
  6728. .. _Representation of Orthogonal or Unitary Matrices:
  6729. https://www.netlib.org/lapack/lug/node128.html
  6730. """,
  6731. )
  6732. add_docstr(
  6733. torch.permute,
  6734. r"""
  6735. permute(input, dims) -> Tensor
  6736. Returns a view of the original tensor :attr:`input` with its dimensions permuted.
  6737. Args:
  6738. {input}
  6739. dims (tuple of int): The desired ordering of dimensions
  6740. Example:
  6741. >>> x = torch.randn(2, 3, 5)
  6742. >>> x.size()
  6743. torch.Size([2, 3, 5])
  6744. >>> torch.permute(x, (2, 0, 1)).size()
  6745. torch.Size([5, 2, 3])
  6746. """.format(
  6747. **common_args
  6748. ),
  6749. )
  6750. add_docstr(
  6751. torch.poisson,
  6752. r"""
  6753. poisson(input, generator=None) -> Tensor
  6754. Returns a tensor of the same size as :attr:`input` with each element
  6755. sampled from a Poisson distribution with rate parameter given by the corresponding
  6756. element in :attr:`input` i.e.,
  6757. .. math::
  6758. \text{{out}}_i \sim \text{{Poisson}}(\text{{input}}_i)
  6759. :attr:`input` must be non-negative.
  6760. Args:
  6761. input (Tensor): the input tensor containing the rates of the Poisson distribution
  6762. Keyword args:
  6763. {generator}
  6764. Example::
  6765. >>> rates = torch.rand(4, 4) * 5 # rate parameter between 0 and 5
  6766. >>> torch.poisson(rates)
  6767. tensor([[9., 1., 3., 5.],
  6768. [8., 6., 6., 0.],
  6769. [0., 4., 5., 3.],
  6770. [2., 1., 4., 2.]])
  6771. """.format(
  6772. **common_args
  6773. ),
  6774. )
  6775. add_docstr(
  6776. torch.polygamma,
  6777. r"""
  6778. polygamma(n, input, *, out=None) -> Tensor
  6779. Alias for :func:`torch.special.polygamma`.
  6780. """,
  6781. )
  6782. add_docstr(
  6783. torch.positive,
  6784. r"""
  6785. positive(input) -> Tensor
  6786. Returns :attr:`input`.
  6787. Throws a runtime error if :attr:`input` is a bool tensor.
  6788. """
  6789. + r"""
  6790. Args:
  6791. {input}
  6792. Example::
  6793. >>> t = torch.randn(5)
  6794. >>> t
  6795. tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940])
  6796. >>> torch.positive(t)
  6797. tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940])
  6798. """.format(
  6799. **common_args
  6800. ),
  6801. )
  6802. add_docstr(
  6803. torch.pow,
  6804. r"""
  6805. pow(input, exponent, *, out=None) -> Tensor
  6806. Takes the power of each element in :attr:`input` with :attr:`exponent` and
  6807. returns a tensor with the result.
  6808. :attr:`exponent` can be either a single ``float`` number or a `Tensor`
  6809. with the same number of elements as :attr:`input`.
  6810. When :attr:`exponent` is a scalar value, the operation applied is:
  6811. .. math::
  6812. \text{out}_i = x_i ^ \text{exponent}
  6813. When :attr:`exponent` is a tensor, the operation applied is:
  6814. .. math::
  6815. \text{out}_i = x_i ^ {\text{exponent}_i}
  6816. """
  6817. + r"""
  6818. When :attr:`exponent` is a tensor, the shapes of :attr:`input`
  6819. and :attr:`exponent` must be :ref:`broadcastable <broadcasting-semantics>`.
  6820. Args:
  6821. {input}
  6822. exponent (float or tensor): the exponent value
  6823. Keyword args:
  6824. {out}
  6825. Example::
  6826. >>> a = torch.randn(4)
  6827. >>> a
  6828. tensor([ 0.4331, 1.2475, 0.6834, -0.2791])
  6829. >>> torch.pow(a, 2)
  6830. tensor([ 0.1875, 1.5561, 0.4670, 0.0779])
  6831. >>> exp = torch.arange(1., 5.)
  6832. >>> a = torch.arange(1., 5.)
  6833. >>> a
  6834. tensor([ 1., 2., 3., 4.])
  6835. >>> exp
  6836. tensor([ 1., 2., 3., 4.])
  6837. >>> torch.pow(a, exp)
  6838. tensor([ 1., 4., 27., 256.])
  6839. .. function:: pow(self, exponent, *, out=None) -> Tensor
  6840. :noindex:
  6841. :attr:`self` is a scalar ``float`` value, and :attr:`exponent` is a tensor.
  6842. The returned tensor :attr:`out` is of the same shape as :attr:`exponent`
  6843. The operation applied is:
  6844. .. math::
  6845. \text{{out}}_i = \text{{self}} ^ {{\text{{exponent}}_i}}
  6846. Args:
  6847. self (float): the scalar base value for the power operation
  6848. exponent (Tensor): the exponent tensor
  6849. Keyword args:
  6850. {out}
  6851. Example::
  6852. >>> exp = torch.arange(1., 5.)
  6853. >>> base = 2
  6854. >>> torch.pow(base, exp)
  6855. tensor([ 2., 4., 8., 16.])
  6856. """.format(
  6857. **common_args
  6858. ),
  6859. )
  6860. add_docstr(
  6861. torch.float_power,
  6862. r"""
  6863. float_power(input, exponent, *, out=None) -> Tensor
  6864. Raises :attr:`input` to the power of :attr:`exponent`, elementwise, in double precision.
  6865. If neither input is complex returns a ``torch.float64`` tensor,
  6866. and if one or more inputs is complex returns a ``torch.complex128`` tensor.
  6867. .. note::
  6868. This function always computes in double precision, unlike :func:`torch.pow`,
  6869. which implements more typical :ref:`type promotion <type-promotion-doc>`.
  6870. This is useful when the computation needs to be performed in a wider or more precise dtype,
  6871. or the results of the computation may contain fractional values not representable in the input dtypes,
  6872. like when an integer base is raised to a negative integer exponent.
  6873. Args:
  6874. input (Tensor or Number): the base value(s)
  6875. exponent (Tensor or Number): the exponent value(s)
  6876. Keyword args:
  6877. {out}
  6878. Example::
  6879. >>> a = torch.randint(10, (4,))
  6880. >>> a
  6881. tensor([6, 4, 7, 1])
  6882. >>> torch.float_power(a, 2)
  6883. tensor([36., 16., 49., 1.], dtype=torch.float64)
  6884. >>> a = torch.arange(1, 5)
  6885. >>> a
  6886. tensor([ 1, 2, 3, 4])
  6887. >>> exp = torch.tensor([2, -3, 4, -5])
  6888. >>> exp
  6889. tensor([ 2, -3, 4, -5])
  6890. >>> torch.float_power(a, exp)
  6891. tensor([1.0000e+00, 1.2500e-01, 8.1000e+01, 9.7656e-04], dtype=torch.float64)
  6892. """.format(
  6893. **common_args
  6894. ),
  6895. )
  6896. add_docstr(
  6897. torch.prod,
  6898. r"""
  6899. prod(input, *, dtype=None) -> Tensor
  6900. Returns the product of all elements in the :attr:`input` tensor.
  6901. Args:
  6902. {input}
  6903. Keyword args:
  6904. {dtype}
  6905. Example::
  6906. >>> a = torch.randn(1, 3)
  6907. >>> a
  6908. tensor([[-0.8020, 0.5428, -1.5854]])
  6909. >>> torch.prod(a)
  6910. tensor(0.6902)
  6911. .. function:: prod(input, dim, keepdim=False, *, dtype=None) -> Tensor
  6912. :noindex:
  6913. Returns the product of each row of the :attr:`input` tensor in the given
  6914. dimension :attr:`dim`.
  6915. {keepdim_details}
  6916. Args:
  6917. {input}
  6918. {dim}
  6919. {keepdim}
  6920. Keyword args:
  6921. {dtype}
  6922. Example::
  6923. >>> a = torch.randn(4, 2)
  6924. >>> a
  6925. tensor([[ 0.5261, -0.3837],
  6926. [ 1.1857, -0.2498],
  6927. [-1.1646, 0.0705],
  6928. [ 1.1131, -1.0629]])
  6929. >>> torch.prod(a, 1)
  6930. tensor([-0.2018, -0.2962, -0.0821, -1.1831])
  6931. """.format(
  6932. **single_dim_common
  6933. ),
  6934. )
  6935. add_docstr(
  6936. torch.promote_types,
  6937. r"""
  6938. promote_types(type1, type2) -> dtype
  6939. Returns the :class:`torch.dtype` with the smallest size and scalar kind that is
  6940. not smaller nor of lower kind than either `type1` or `type2`. See type promotion
  6941. :ref:`documentation <type-promotion-doc>` for more information on the type
  6942. promotion logic.
  6943. Args:
  6944. type1 (:class:`torch.dtype`)
  6945. type2 (:class:`torch.dtype`)
  6946. Example::
  6947. >>> torch.promote_types(torch.int32, torch.float32)
  6948. torch.float32
  6949. >>> torch.promote_types(torch.uint8, torch.long)
  6950. torch.long
  6951. """,
  6952. )
  6953. add_docstr(
  6954. torch.qr,
  6955. r"""
  6956. qr(input, some=True, *, out=None) -> (Tensor, Tensor)
  6957. Computes the QR decomposition of a matrix or a batch of matrices :attr:`input`,
  6958. and returns a namedtuple (Q, R) of tensors such that :math:`\text{input} = Q R`
  6959. with :math:`Q` being an orthogonal matrix or batch of orthogonal matrices and
  6960. :math:`R` being an upper triangular matrix or batch of upper triangular matrices.
  6961. If :attr:`some` is ``True``, then this function returns the thin (reduced) QR factorization.
  6962. Otherwise, if :attr:`some` is ``False``, this function returns the complete QR factorization.
  6963. .. warning::
  6964. :func:`torch.qr` is deprecated in favor of :func:`torch.linalg.qr`
  6965. and will be removed in a future PyTorch release. The boolean parameter :attr:`some` has been
  6966. replaced with a string parameter :attr:`mode`.
  6967. ``Q, R = torch.qr(A)`` should be replaced with
  6968. .. code:: python
  6969. Q, R = torch.linalg.qr(A)
  6970. ``Q, R = torch.qr(A, some=False)`` should be replaced with
  6971. .. code:: python
  6972. Q, R = torch.linalg.qr(A, mode="complete")
  6973. .. warning::
  6974. If you plan to backpropagate through QR, note that the current backward implementation
  6975. is only well-defined when the first :math:`\min(input.size(-1), input.size(-2))`
  6976. columns of :attr:`input` are linearly independent.
  6977. This behavior will probably change once QR supports pivoting.
  6978. .. note:: This function uses LAPACK for CPU inputs and MAGMA for CUDA inputs,
  6979. and may produce different (valid) decompositions on different device types
  6980. or different platforms.
  6981. Args:
  6982. input (Tensor): the input tensor of size :math:`(*, m, n)` where `*` is zero or more
  6983. batch dimensions consisting of matrices of dimension :math:`m \times n`.
  6984. some (bool, optional): Set to ``True`` for reduced QR decomposition and ``False`` for
  6985. complete QR decomposition. If `k = min(m, n)` then:
  6986. * ``some=True`` : returns `(Q, R)` with dimensions (m, k), (k, n) (default)
  6987. * ``'some=False'``: returns `(Q, R)` with dimensions (m, m), (m, n)
  6988. Keyword args:
  6989. out (tuple, optional): tuple of `Q` and `R` tensors.
  6990. The dimensions of `Q` and `R` are detailed in the description of :attr:`some` above.
  6991. Example::
  6992. >>> a = torch.tensor([[12., -51, 4], [6, 167, -68], [-4, 24, -41]])
  6993. >>> q, r = torch.qr(a)
  6994. >>> q
  6995. tensor([[-0.8571, 0.3943, 0.3314],
  6996. [-0.4286, -0.9029, -0.0343],
  6997. [ 0.2857, -0.1714, 0.9429]])
  6998. >>> r
  6999. tensor([[ -14.0000, -21.0000, 14.0000],
  7000. [ 0.0000, -175.0000, 70.0000],
  7001. [ 0.0000, 0.0000, -35.0000]])
  7002. >>> torch.mm(q, r).round()
  7003. tensor([[ 12., -51., 4.],
  7004. [ 6., 167., -68.],
  7005. [ -4., 24., -41.]])
  7006. >>> torch.mm(q.t(), q).round()
  7007. tensor([[ 1., 0., 0.],
  7008. [ 0., 1., -0.],
  7009. [ 0., -0., 1.]])
  7010. >>> a = torch.randn(3, 4, 5)
  7011. >>> q, r = torch.qr(a, some=False)
  7012. >>> torch.allclose(torch.matmul(q, r), a)
  7013. True
  7014. >>> torch.allclose(torch.matmul(q.mT, q), torch.eye(5))
  7015. True
  7016. """,
  7017. )
  7018. add_docstr(
  7019. torch.rad2deg,
  7020. r"""
  7021. rad2deg(input, *, out=None) -> Tensor
  7022. Returns a new tensor with each of the elements of :attr:`input`
  7023. converted from angles in radians to degrees.
  7024. Args:
  7025. {input}
  7026. Keyword arguments:
  7027. {out}
  7028. Example::
  7029. >>> a = torch.tensor([[3.142, -3.142], [6.283, -6.283], [1.570, -1.570]])
  7030. >>> torch.rad2deg(a)
  7031. tensor([[ 180.0233, -180.0233],
  7032. [ 359.9894, -359.9894],
  7033. [ 89.9544, -89.9544]])
  7034. """.format(
  7035. **common_args
  7036. ),
  7037. )
  7038. add_docstr(
  7039. torch.deg2rad,
  7040. r"""
  7041. deg2rad(input, *, out=None) -> Tensor
  7042. Returns a new tensor with each of the elements of :attr:`input`
  7043. converted from angles in degrees to radians.
  7044. Args:
  7045. {input}
  7046. Keyword arguments:
  7047. {out}
  7048. Example::
  7049. >>> a = torch.tensor([[180.0, -180.0], [360.0, -360.0], [90.0, -90.0]])
  7050. >>> torch.deg2rad(a)
  7051. tensor([[ 3.1416, -3.1416],
  7052. [ 6.2832, -6.2832],
  7053. [ 1.5708, -1.5708]])
  7054. """.format(
  7055. **common_args
  7056. ),
  7057. )
  7058. add_docstr(
  7059. torch.heaviside,
  7060. r"""
  7061. heaviside(input, values, *, out=None) -> Tensor
  7062. Computes the Heaviside step function for each element in :attr:`input`.
  7063. The Heaviside step function is defined as:
  7064. .. math::
  7065. \text{{heaviside}}(input, values) = \begin{cases}
  7066. 0, & \text{if input < 0}\\
  7067. values, & \text{if input == 0}\\
  7068. 1, & \text{if input > 0}
  7069. \end{cases}
  7070. """
  7071. + r"""
  7072. Args:
  7073. {input}
  7074. values (Tensor): The values to use where :attr:`input` is zero.
  7075. Keyword arguments:
  7076. {out}
  7077. Example::
  7078. >>> input = torch.tensor([-1.5, 0, 2.0])
  7079. >>> values = torch.tensor([0.5])
  7080. >>> torch.heaviside(input, values)
  7081. tensor([0.0000, 0.5000, 1.0000])
  7082. >>> values = torch.tensor([1.2, -2.0, 3.5])
  7083. >>> torch.heaviside(input, values)
  7084. tensor([0., -2., 1.])
  7085. """.format(
  7086. **common_args
  7087. ),
  7088. )
  7089. add_docstr(
  7090. torch.rand,
  7091. """
  7092. rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, \
  7093. requires_grad=False, pin_memory=False) -> Tensor
  7094. """
  7095. + r"""
  7096. Returns a tensor filled with random numbers from a uniform distribution
  7097. on the interval :math:`[0, 1)`
  7098. The shape of the tensor is defined by the variable argument :attr:`size`.
  7099. Args:
  7100. size (int...): a sequence of integers defining the shape of the output tensor.
  7101. Can be a variable number of arguments or a collection like a list or tuple.
  7102. Keyword args:
  7103. {generator}
  7104. {out}
  7105. {dtype}
  7106. {layout}
  7107. {device}
  7108. {requires_grad}
  7109. {pin_memory}
  7110. Example::
  7111. >>> torch.rand(4)
  7112. tensor([ 0.5204, 0.2503, 0.3525, 0.5673])
  7113. >>> torch.rand(2, 3)
  7114. tensor([[ 0.8237, 0.5781, 0.6879],
  7115. [ 0.3816, 0.7249, 0.0998]])
  7116. """.format(
  7117. **factory_common_args
  7118. ),
  7119. )
  7120. add_docstr(
  7121. torch.rand_like,
  7122. r"""
  7123. rand_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
  7124. Returns a tensor with the same size as :attr:`input` that is filled with
  7125. random numbers from a uniform distribution on the interval :math:`[0, 1)`.
  7126. ``torch.rand_like(input)`` is equivalent to
  7127. ``torch.rand(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
  7128. Args:
  7129. {input}
  7130. Keyword args:
  7131. {dtype}
  7132. {layout}
  7133. {device}
  7134. {requires_grad}
  7135. {memory_format}
  7136. """.format(
  7137. **factory_like_common_args
  7138. ),
  7139. )
  7140. add_docstr(
  7141. torch.randint,
  7142. """
  7143. randint(low=0, high, size, \\*, generator=None, out=None, \
  7144. dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  7145. Returns a tensor filled with random integers generated uniformly
  7146. between :attr:`low` (inclusive) and :attr:`high` (exclusive).
  7147. The shape of the tensor is defined by the variable argument :attr:`size`.
  7148. .. note::
  7149. With the global dtype default (``torch.float32``), this function returns
  7150. a tensor with dtype ``torch.int64``.
  7151. Args:
  7152. low (int, optional): Lowest integer to be drawn from the distribution. Default: 0.
  7153. high (int): One above the highest integer to be drawn from the distribution.
  7154. size (tuple): a tuple defining the shape of the output tensor.
  7155. Keyword args:
  7156. {generator}
  7157. {out}
  7158. dtype (`torch.dtype`, optional) - the desired data type of returned tensor. Default: if ``None``,
  7159. this function returns a tensor with dtype ``torch.int64``.
  7160. {layout}
  7161. {device}
  7162. {requires_grad}
  7163. Example::
  7164. >>> torch.randint(3, 5, (3,))
  7165. tensor([4, 3, 4])
  7166. >>> torch.randint(10, (2, 2))
  7167. tensor([[0, 2],
  7168. [5, 5]])
  7169. >>> torch.randint(3, 10, (2, 2))
  7170. tensor([[4, 5],
  7171. [6, 7]])
  7172. """.format(
  7173. **factory_common_args
  7174. ),
  7175. )
  7176. add_docstr(
  7177. torch.randint_like,
  7178. """
  7179. randint_like(input, low=0, high, \\*, dtype=None, layout=torch.strided, device=None, requires_grad=False, \
  7180. memory_format=torch.preserve_format) -> Tensor
  7181. Returns a tensor with the same shape as Tensor :attr:`input` filled with
  7182. random integers generated uniformly between :attr:`low` (inclusive) and
  7183. :attr:`high` (exclusive).
  7184. .. note:
  7185. With the global dtype default (``torch.float32``), this function returns
  7186. a tensor with dtype ``torch.int64``.
  7187. Args:
  7188. {input}
  7189. low (int, optional): Lowest integer to be drawn from the distribution. Default: 0.
  7190. high (int): One above the highest integer to be drawn from the distribution.
  7191. Keyword args:
  7192. {dtype}
  7193. {layout}
  7194. {device}
  7195. {requires_grad}
  7196. {memory_format}
  7197. """.format(
  7198. **factory_like_common_args
  7199. ),
  7200. )
  7201. add_docstr(
  7202. torch.randn,
  7203. """
  7204. randn(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, \
  7205. pin_memory=False) -> Tensor
  7206. """
  7207. + r"""
  7208. Returns a tensor filled with random numbers from a normal distribution
  7209. with mean `0` and variance `1` (also called the standard normal
  7210. distribution).
  7211. .. math::
  7212. \text{{out}}_{{i}} \sim \mathcal{{N}}(0, 1)
  7213. The shape of the tensor is defined by the variable argument :attr:`size`.
  7214. Args:
  7215. size (int...): a sequence of integers defining the shape of the output tensor.
  7216. Can be a variable number of arguments or a collection like a list or tuple.
  7217. Keyword args:
  7218. {generator}
  7219. {out}
  7220. {dtype}
  7221. {layout}
  7222. {device}
  7223. {requires_grad}
  7224. {pin_memory}
  7225. Example::
  7226. >>> torch.randn(4)
  7227. tensor([-2.1436, 0.9966, 2.3426, -0.6366])
  7228. >>> torch.randn(2, 3)
  7229. tensor([[ 1.5954, 2.8929, -1.0923],
  7230. [ 1.1719, -0.4709, -0.1996]])
  7231. """.format(
  7232. **factory_common_args
  7233. ),
  7234. )
  7235. add_docstr(
  7236. torch.randn_like,
  7237. r"""
  7238. randn_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
  7239. Returns a tensor with the same size as :attr:`input` that is filled with
  7240. random numbers from a normal distribution with mean 0 and variance 1.
  7241. ``torch.randn_like(input)`` is equivalent to
  7242. ``torch.randn(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
  7243. Args:
  7244. {input}
  7245. Keyword args:
  7246. {dtype}
  7247. {layout}
  7248. {device}
  7249. {requires_grad}
  7250. {memory_format}
  7251. """.format(
  7252. **factory_like_common_args
  7253. ),
  7254. )
  7255. add_docstr(
  7256. torch.randperm,
  7257. """
  7258. randperm(n, *, generator=None, out=None, dtype=torch.int64,layout=torch.strided, \
  7259. device=None, requires_grad=False, pin_memory=False) -> Tensor
  7260. """
  7261. + r"""
  7262. Returns a random permutation of integers from ``0`` to ``n - 1``.
  7263. Args:
  7264. n (int): the upper bound (exclusive)
  7265. Keyword args:
  7266. {generator}
  7267. {out}
  7268. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  7269. Default: ``torch.int64``.
  7270. {layout}
  7271. {device}
  7272. {requires_grad}
  7273. {pin_memory}
  7274. Example::
  7275. >>> torch.randperm(4)
  7276. tensor([2, 1, 0, 3])
  7277. """.format(
  7278. **factory_common_args
  7279. ),
  7280. )
  7281. add_docstr(
  7282. torch.tensor,
  7283. r"""
  7284. tensor(data, *, dtype=None, device=None, requires_grad=False, pin_memory=False) -> Tensor
  7285. Constructs a tensor with no autograd history (also known as a "leaf tensor", see :doc:`/notes/autograd`) by copying :attr:`data`.
  7286. .. warning::
  7287. When working with tensors prefer using :func:`torch.Tensor.clone`,
  7288. :func:`torch.Tensor.detach`, and :func:`torch.Tensor.requires_grad_` for
  7289. readability. Letting `t` be a tensor, ``torch.tensor(t)`` is equivalent to
  7290. ``t.clone().detach()``, and ``torch.tensor(t, requires_grad=True)``
  7291. is equivalent to ``t.clone().detach().requires_grad_(True)``.
  7292. .. seealso::
  7293. :func:`torch.as_tensor` preserves autograd history and avoids copies where possible.
  7294. :func:`torch.from_numpy` creates a tensor that shares storage with a NumPy array.
  7295. Args:
  7296. {data}
  7297. Keyword args:
  7298. {dtype}
  7299. device (:class:`torch.device`, optional): the device of the constructed tensor. If None and data is a tensor
  7300. then the device of data is used. If None and data is not a tensor then
  7301. the result tensor is constructed on the CPU.
  7302. {requires_grad}
  7303. {pin_memory}
  7304. Example::
  7305. >>> torch.tensor([[0.1, 1.2], [2.2, 3.1], [4.9, 5.2]])
  7306. tensor([[ 0.1000, 1.2000],
  7307. [ 2.2000, 3.1000],
  7308. [ 4.9000, 5.2000]])
  7309. >>> torch.tensor([0, 1]) # Type inference on data
  7310. tensor([ 0, 1])
  7311. >>> torch.tensor([[0.11111, 0.222222, 0.3333333]],
  7312. ... dtype=torch.float64,
  7313. ... device=torch.device('cuda:0')) # creates a double tensor on a CUDA device
  7314. tensor([[ 0.1111, 0.2222, 0.3333]], dtype=torch.float64, device='cuda:0')
  7315. >>> torch.tensor(3.14159) # Create a zero-dimensional (scalar) tensor
  7316. tensor(3.1416)
  7317. >>> torch.tensor([]) # Create an empty tensor (of size (0,))
  7318. tensor([])
  7319. """.format(
  7320. **factory_data_common_args
  7321. ),
  7322. )
  7323. add_docstr(
  7324. torch.range,
  7325. r"""
  7326. range(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  7327. Returns a 1-D tensor of size :math:`\left\lfloor \frac{\text{end} - \text{start}}{\text{step}} \right\rfloor + 1`
  7328. with values from :attr:`start` to :attr:`end` with step :attr:`step`. Step is
  7329. the gap between two values in the tensor.
  7330. .. math::
  7331. \text{out}_{i+1} = \text{out}_i + \text{step}.
  7332. """
  7333. + r"""
  7334. .. warning::
  7335. This function is deprecated and will be removed in a future release because its behavior is inconsistent with
  7336. Python's range builtin. Instead, use :func:`torch.arange`, which produces values in [start, end).
  7337. Args:
  7338. start (float): the starting value for the set of points. Default: ``0``.
  7339. end (float): the ending value for the set of points
  7340. step (float): the gap between each pair of adjacent points. Default: ``1``.
  7341. Keyword args:
  7342. {out}
  7343. {dtype} If `dtype` is not given, infer the data type from the other input
  7344. arguments. If any of `start`, `end`, or `stop` are floating-point, the
  7345. `dtype` is inferred to be the default dtype, see
  7346. :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to
  7347. be `torch.int64`.
  7348. {layout}
  7349. {device}
  7350. {requires_grad}
  7351. Example::
  7352. >>> torch.range(1, 4)
  7353. tensor([ 1., 2., 3., 4.])
  7354. >>> torch.range(1, 4, 0.5)
  7355. tensor([ 1.0000, 1.5000, 2.0000, 2.5000, 3.0000, 3.5000, 4.0000])
  7356. """.format(
  7357. **factory_common_args
  7358. ),
  7359. )
  7360. add_docstr(
  7361. torch.arange,
  7362. r"""
  7363. arange(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  7364. Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil`
  7365. with values from the interval ``[start, end)`` taken with common difference
  7366. :attr:`step` beginning from `start`.
  7367. Note that non-integer :attr:`step` is subject to floating point rounding errors when
  7368. comparing against :attr:`end`; to avoid inconsistency, we advise adding a small epsilon to :attr:`end`
  7369. in such cases.
  7370. .. math::
  7371. \text{out}_{{i+1}} = \text{out}_{i} + \text{step}
  7372. """
  7373. + r"""
  7374. Args:
  7375. start (Number): the starting value for the set of points. Default: ``0``.
  7376. end (Number): the ending value for the set of points
  7377. step (Number): the gap between each pair of adjacent points. Default: ``1``.
  7378. Keyword args:
  7379. {out}
  7380. {dtype} If `dtype` is not given, infer the data type from the other input
  7381. arguments. If any of `start`, `end`, or `stop` are floating-point, the
  7382. `dtype` is inferred to be the default dtype, see
  7383. :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to
  7384. be `torch.int64`.
  7385. {layout}
  7386. {device}
  7387. {requires_grad}
  7388. Example::
  7389. >>> torch.arange(5)
  7390. tensor([ 0, 1, 2, 3, 4])
  7391. >>> torch.arange(1, 4)
  7392. tensor([ 1, 2, 3])
  7393. >>> torch.arange(1, 2.5, 0.5)
  7394. tensor([ 1.0000, 1.5000, 2.0000])
  7395. """.format(
  7396. **factory_common_args
  7397. ),
  7398. )
  7399. add_docstr(
  7400. torch.ravel,
  7401. r"""
  7402. ravel(input) -> Tensor
  7403. Return a contiguous flattened tensor. A copy is made only if needed.
  7404. Args:
  7405. {input}
  7406. Example::
  7407. >>> t = torch.tensor([[[1, 2],
  7408. ... [3, 4]],
  7409. ... [[5, 6],
  7410. ... [7, 8]]])
  7411. >>> torch.ravel(t)
  7412. tensor([1, 2, 3, 4, 5, 6, 7, 8])
  7413. """.format(
  7414. **common_args
  7415. ),
  7416. )
  7417. add_docstr(
  7418. torch.remainder,
  7419. r"""
  7420. remainder(input, other, *, out=None) -> Tensor
  7421. Computes
  7422. `Python's modulus operation <https://docs.python.org/3/reference/expressions.html#binary-arithmetic-operations>`_
  7423. entrywise. The result has the same sign as the divisor :attr:`other` and its absolute value
  7424. is less than that of :attr:`other`.
  7425. It may also be defined in terms of :func:`torch.div` as
  7426. .. code:: python
  7427. torch.remainder(a, b) == a - a.div(b, rounding_mode="floor") * b
  7428. Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
  7429. :ref:`type promotion <type-promotion-doc>`, and integer and float inputs.
  7430. .. note::
  7431. Complex inputs are not supported. In some cases, it is not mathematically
  7432. possible to satisfy the definition of a modulo operation with complex numbers.
  7433. See :func:`torch.fmod` for how division by zero is handled.
  7434. .. seealso::
  7435. :func:`torch.fmod` which implements C++'s `std::fmod <https://en.cppreference.com/w/cpp/numeric/math/fmod>`_.
  7436. This one is defined in terms of division rounding towards zero.
  7437. Args:
  7438. input (Tensor or Scalar): the dividend
  7439. other (Tensor or Scalar): the divisor
  7440. Keyword args:
  7441. {out}
  7442. Example::
  7443. >>> torch.remainder(torch.tensor([-3., -2, -1, 1, 2, 3]), 2)
  7444. tensor([ 1., 0., 1., 1., 0., 1.])
  7445. >>> torch.remainder(torch.tensor([1, 2, 3, 4, 5]), -1.5)
  7446. tensor([ -0.5000, -1.0000, 0.0000, -0.5000, -1.0000 ])
  7447. """.format(
  7448. **common_args
  7449. ),
  7450. )
  7451. add_docstr(
  7452. torch.renorm,
  7453. r"""
  7454. renorm(input, p, dim, maxnorm, *, out=None) -> Tensor
  7455. Returns a tensor where each sub-tensor of :attr:`input` along dimension
  7456. :attr:`dim` is normalized such that the `p`-norm of the sub-tensor is lower
  7457. than the value :attr:`maxnorm`
  7458. .. note:: If the norm of a row is lower than `maxnorm`, the row is unchanged
  7459. Args:
  7460. {input}
  7461. p (float): the power for the norm computation
  7462. dim (int): the dimension to slice over to get the sub-tensors
  7463. maxnorm (float): the maximum norm to keep each sub-tensor under
  7464. Keyword args:
  7465. {out}
  7466. Example::
  7467. >>> x = torch.ones(3, 3)
  7468. >>> x[1].fill_(2)
  7469. tensor([ 2., 2., 2.])
  7470. >>> x[2].fill_(3)
  7471. tensor([ 3., 3., 3.])
  7472. >>> x
  7473. tensor([[ 1., 1., 1.],
  7474. [ 2., 2., 2.],
  7475. [ 3., 3., 3.]])
  7476. >>> torch.renorm(x, 1, 0, 5)
  7477. tensor([[ 1.0000, 1.0000, 1.0000],
  7478. [ 1.6667, 1.6667, 1.6667],
  7479. [ 1.6667, 1.6667, 1.6667]])
  7480. """.format(
  7481. **common_args
  7482. ),
  7483. )
  7484. add_docstr(
  7485. torch.reshape,
  7486. r"""
  7487. reshape(input, shape) -> Tensor
  7488. Returns a tensor with the same data and number of elements as :attr:`input`,
  7489. but with the specified shape. When possible, the returned tensor will be a view
  7490. of :attr:`input`. Otherwise, it will be a copy. Contiguous inputs and inputs
  7491. with compatible strides can be reshaped without copying, but you should not
  7492. depend on the copying vs. viewing behavior.
  7493. See :meth:`torch.Tensor.view` on when it is possible to return a view.
  7494. A single dimension may be -1, in which case it's inferred from the remaining
  7495. dimensions and the number of elements in :attr:`input`.
  7496. Args:
  7497. input (Tensor): the tensor to be reshaped
  7498. shape (tuple of int): the new shape
  7499. Example::
  7500. >>> a = torch.arange(4.)
  7501. >>> torch.reshape(a, (2, 2))
  7502. tensor([[ 0., 1.],
  7503. [ 2., 3.]])
  7504. >>> b = torch.tensor([[0, 1], [2, 3]])
  7505. >>> torch.reshape(b, (-1,))
  7506. tensor([ 0, 1, 2, 3])
  7507. """,
  7508. )
  7509. add_docstr(
  7510. torch.result_type,
  7511. r"""
  7512. result_type(tensor1, tensor2) -> dtype
  7513. Returns the :class:`torch.dtype` that would result from performing an arithmetic
  7514. operation on the provided input tensors. See type promotion :ref:`documentation <type-promotion-doc>`
  7515. for more information on the type promotion logic.
  7516. Args:
  7517. tensor1 (Tensor or Number): an input tensor or number
  7518. tensor2 (Tensor or Number): an input tensor or number
  7519. Example::
  7520. >>> torch.result_type(torch.tensor([1, 2], dtype=torch.int), 1.0)
  7521. torch.float32
  7522. >>> torch.result_type(torch.tensor([1, 2], dtype=torch.uint8), torch.tensor(1))
  7523. torch.uint8
  7524. """,
  7525. )
  7526. add_docstr(
  7527. torch.row_stack,
  7528. r"""
  7529. row_stack(tensors, *, out=None) -> Tensor
  7530. Alias of :func:`torch.vstack`.
  7531. """,
  7532. )
  7533. add_docstr(
  7534. torch.round,
  7535. r"""
  7536. round(input, *, decimals=0, out=None) -> Tensor
  7537. Rounds elements of :attr:`input` to the nearest integer.
  7538. For integer inputs, follows the array-api convention of returning a
  7539. copy of the input tensor.
  7540. .. note::
  7541. This function implements the "round half to even" to
  7542. break ties when a number is equidistant from two
  7543. integers (e.g. `round(2.5)` is 2).
  7544. When the :attr:\`decimals\` argument is specified the
  7545. algorithm used is similar to NumPy's `around`. This
  7546. algorithm is fast but inexact and it can easily
  7547. overflow for low precision dtypes.
  7548. Eg. `round(tensor([10000], dtype=torch.float16), decimals=3)` is `inf`.
  7549. .. seealso::
  7550. :func:`torch.ceil`, which rounds up.
  7551. :func:`torch.floor`, which rounds down.
  7552. :func:`torch.trunc`, which rounds towards zero.
  7553. Args:
  7554. {input}
  7555. decimals (int): Number of decimal places to round to (default: 0).
  7556. If decimals is negative, it specifies the number of positions
  7557. to the left of the decimal point.
  7558. Keyword args:
  7559. {out}
  7560. Example::
  7561. >>> torch.round(torch.tensor((4.7, -2.3, 9.1, -7.7)))
  7562. tensor([ 5., -2., 9., -8.])
  7563. >>> # Values equidistant from two integers are rounded towards the
  7564. >>> # the nearest even value (zero is treated as even)
  7565. >>> torch.round(torch.tensor([-0.5, 0.5, 1.5, 2.5]))
  7566. tensor([-0., 0., 2., 2.])
  7567. >>> # A positive decimals argument rounds to the to that decimal place
  7568. >>> torch.round(torch.tensor([0.1234567]), decimals=3)
  7569. tensor([0.1230])
  7570. >>> # A negative decimals argument rounds to the left of the decimal
  7571. >>> torch.round(torch.tensor([1200.1234567]), decimals=-3)
  7572. tensor([1000.])
  7573. """.format(
  7574. **common_args
  7575. ),
  7576. )
  7577. add_docstr(
  7578. torch.rsqrt,
  7579. r"""
  7580. rsqrt(input, *, out=None) -> Tensor
  7581. Returns a new tensor with the reciprocal of the square-root of each of
  7582. the elements of :attr:`input`.
  7583. .. math::
  7584. \text{out}_{i} = \frac{1}{\sqrt{\text{input}_{i}}}
  7585. """
  7586. + r"""
  7587. Args:
  7588. {input}
  7589. Keyword args:
  7590. {out}
  7591. Example::
  7592. >>> a = torch.randn(4)
  7593. >>> a
  7594. tensor([-0.0370, 0.2970, 1.5420, -0.9105])
  7595. >>> torch.rsqrt(a)
  7596. tensor([ nan, 1.8351, 0.8053, nan])
  7597. """.format(
  7598. **common_args
  7599. ),
  7600. )
  7601. add_docstr(
  7602. torch.scatter,
  7603. r"""
  7604. scatter(input, dim, index, src) -> Tensor
  7605. Out-of-place version of :meth:`torch.Tensor.scatter_`
  7606. """,
  7607. )
  7608. add_docstr(
  7609. torch.scatter_add,
  7610. r"""
  7611. scatter_add(input, dim, index, src) -> Tensor
  7612. Out-of-place version of :meth:`torch.Tensor.scatter_add_`
  7613. """,
  7614. )
  7615. add_docstr(
  7616. torch.scatter_reduce,
  7617. r"""
  7618. scatter_reduce(input, dim, index, src, reduce, *, include_self=True) -> Tensor
  7619. Out-of-place version of :meth:`torch.Tensor.scatter_reduce_`
  7620. """,
  7621. )
  7622. add_docstr(
  7623. torch.select,
  7624. r"""
  7625. select(input, dim, index) -> Tensor
  7626. Slices the :attr:`input` tensor along the selected dimension at the given index.
  7627. This function returns a view of the original tensor with the given dimension removed.
  7628. .. note:: If :attr:`input` is a sparse tensor and returning a view of
  7629. the tensor is not possible, a RuntimeError exception is
  7630. raised. In this is the case, consider using
  7631. :func:`torch.select_copy` function.
  7632. Args:
  7633. {input}
  7634. dim (int): the dimension to slice
  7635. index (int): the index to select with
  7636. .. note::
  7637. :meth:`select` is equivalent to slicing. For example,
  7638. ``tensor.select(0, index)`` is equivalent to ``tensor[index]`` and
  7639. ``tensor.select(2, index)`` is equivalent to ``tensor[:,:,index]``.
  7640. """.format(
  7641. **common_args
  7642. ),
  7643. )
  7644. add_docstr(
  7645. torch.select_scatter,
  7646. r"""
  7647. select_scatter(input, src, dim, index) -> Tensor
  7648. Embeds the values of the :attr:`src` tensor into :attr:`input` at the given index.
  7649. This function returns a tensor with fresh storage; it does not create a view.
  7650. Args:
  7651. {input}
  7652. src (Tensor): The tensor to embed into :attr:`input`
  7653. dim (int): the dimension to insert the slice into.
  7654. index (int): the index to select with
  7655. .. note::
  7656. :attr:`src` must be of the proper size in order to be embedded
  7657. into :attr:`input`. Specifically, it should have the same shape as
  7658. ``torch.select(input, dim, index)``
  7659. Example::
  7660. >>> a = torch.zeros(2, 2)
  7661. >>> b = torch.ones(2)
  7662. >>> a.select_scatter(b, 0, 0)
  7663. tensor([[1., 1.],
  7664. [0., 0.]])
  7665. """.format(
  7666. **common_args
  7667. ),
  7668. )
  7669. add_docstr(
  7670. torch.slice_scatter,
  7671. r"""
  7672. slice_scatter(input, src, dim=0, start=None, end=None, step=1) -> Tensor
  7673. Embeds the values of the :attr:`src` tensor into :attr:`input` at the given
  7674. dimension.
  7675. This function returns a tensor with fresh storage; it does not create a view.
  7676. Args:
  7677. {input}
  7678. src (Tensor): The tensor to embed into :attr:`input`
  7679. dim (int): the dimension to insert the slice into
  7680. start (Optional[int]): the start index of where to insert the slice
  7681. end (Optional[int]): the end index of where to insert the slice
  7682. step (int): the how many elements to skip in
  7683. Example::
  7684. >>> a = torch.zeros(8, 8)
  7685. >>> b = torch.ones(8)
  7686. >>> a.slice_scatter(b, start=6)
  7687. tensor([[0., 0., 0., 0., 0., 0., 0., 0.],
  7688. [0., 0., 0., 0., 0., 0., 0., 0.],
  7689. [0., 0., 0., 0., 0., 0., 0., 0.],
  7690. [0., 0., 0., 0., 0., 0., 0., 0.],
  7691. [0., 0., 0., 0., 0., 0., 0., 0.],
  7692. [0., 0., 0., 0., 0., 0., 0., 0.],
  7693. [1., 1., 1., 1., 1., 1., 1., 1.],
  7694. [1., 1., 1., 1., 1., 1., 1., 1.]])
  7695. >>> b = torch.ones(2)
  7696. >>> a.slice_scatter(b, dim=1, start=2, end=6, step=2)
  7697. tensor([[0., 0., 1., 0., 1., 0., 0., 0.],
  7698. [0., 0., 1., 0., 1., 0., 0., 0.],
  7699. [0., 0., 1., 0., 1., 0., 0., 0.],
  7700. [0., 0., 1., 0., 1., 0., 0., 0.],
  7701. [0., 0., 1., 0., 1., 0., 0., 0.],
  7702. [0., 0., 1., 0., 1., 0., 0., 0.],
  7703. [0., 0., 1., 0., 1., 0., 0., 0.],
  7704. [0., 0., 1., 0., 1., 0., 0., 0.]])
  7705. """.format(
  7706. **common_args
  7707. ),
  7708. )
  7709. add_docstr(
  7710. torch.set_flush_denormal,
  7711. r"""
  7712. set_flush_denormal(mode) -> bool
  7713. Disables denormal floating numbers on CPU.
  7714. Returns ``True`` if your system supports flushing denormal numbers and it
  7715. successfully configures flush denormal mode. :meth:`~torch.set_flush_denormal`
  7716. is only supported on x86 architectures supporting SSE3.
  7717. Args:
  7718. mode (bool): Controls whether to enable flush denormal mode or not
  7719. Example::
  7720. >>> torch.set_flush_denormal(True)
  7721. True
  7722. >>> torch.tensor([1e-323], dtype=torch.float64)
  7723. tensor([ 0.], dtype=torch.float64)
  7724. >>> torch.set_flush_denormal(False)
  7725. True
  7726. >>> torch.tensor([1e-323], dtype=torch.float64)
  7727. tensor(9.88131e-324 *
  7728. [ 1.0000], dtype=torch.float64)
  7729. """,
  7730. )
  7731. add_docstr(
  7732. torch.set_num_threads,
  7733. r"""
  7734. set_num_threads(int)
  7735. Sets the number of threads used for intraop parallelism on CPU.
  7736. .. warning::
  7737. To ensure that the correct number of threads is used, set_num_threads
  7738. must be called before running eager, JIT or autograd code.
  7739. """,
  7740. )
  7741. add_docstr(
  7742. torch.set_num_interop_threads,
  7743. r"""
  7744. set_num_interop_threads(int)
  7745. Sets the number of threads used for interop parallelism
  7746. (e.g. in JIT interpreter) on CPU.
  7747. .. warning::
  7748. Can only be called once and before any inter-op parallel work
  7749. is started (e.g. JIT execution).
  7750. """,
  7751. )
  7752. add_docstr(
  7753. torch.sigmoid,
  7754. r"""
  7755. sigmoid(input, *, out=None) -> Tensor
  7756. Alias for :func:`torch.special.expit`.
  7757. """,
  7758. )
  7759. add_docstr(
  7760. torch.logit,
  7761. r"""
  7762. logit(input, eps=None, *, out=None) -> Tensor
  7763. Alias for :func:`torch.special.logit`.
  7764. """,
  7765. )
  7766. add_docstr(
  7767. torch.sign,
  7768. r"""
  7769. sign(input, *, out=None) -> Tensor
  7770. Returns a new tensor with the signs of the elements of :attr:`input`.
  7771. .. math::
  7772. \text{out}_{i} = \operatorname{sgn}(\text{input}_{i})
  7773. """
  7774. + r"""
  7775. Args:
  7776. {input}
  7777. Keyword args:
  7778. {out}
  7779. Example::
  7780. >>> a = torch.tensor([0.7, -1.2, 0., 2.3])
  7781. >>> a
  7782. tensor([ 0.7000, -1.2000, 0.0000, 2.3000])
  7783. >>> torch.sign(a)
  7784. tensor([ 1., -1., 0., 1.])
  7785. """.format(
  7786. **common_args
  7787. ),
  7788. )
  7789. add_docstr(
  7790. torch.signbit,
  7791. r"""
  7792. signbit(input, *, out=None) -> Tensor
  7793. Tests if each element of :attr:`input` has its sign bit set or not.
  7794. Args:
  7795. {input}
  7796. Keyword args:
  7797. {out}
  7798. Example::
  7799. >>> a = torch.tensor([0.7, -1.2, 0., 2.3])
  7800. >>> torch.signbit(a)
  7801. tensor([ False, True, False, False])
  7802. >>> a = torch.tensor([-0.0, 0.0])
  7803. >>> torch.signbit(a)
  7804. tensor([ True, False])
  7805. .. note::
  7806. signbit handles signed zeros, so negative zero (-0) returns True.
  7807. """.format(
  7808. **common_args
  7809. ),
  7810. )
  7811. add_docstr(
  7812. torch.sgn,
  7813. r"""
  7814. sgn(input, *, out=None) -> Tensor
  7815. This function is an extension of torch.sign() to complex tensors.
  7816. It computes a new tensor whose elements have
  7817. the same angles as the corresponding elements of :attr:`input` and
  7818. absolute values (i.e. magnitudes) of one for complex tensors and
  7819. is equivalent to torch.sign() for non-complex tensors.
  7820. .. math::
  7821. \text{out}_{i} = \begin{cases}
  7822. 0 & |\text{{input}}_i| == 0 \\
  7823. \frac{{\text{{input}}_i}}{|{\text{{input}}_i}|} & \text{otherwise}
  7824. \end{cases}
  7825. """
  7826. + r"""
  7827. Args:
  7828. {input}
  7829. Keyword args:
  7830. {out}
  7831. Example::
  7832. >>> t = torch.tensor([3+4j, 7-24j, 0, 1+2j])
  7833. >>> t.sgn()
  7834. tensor([0.6000+0.8000j, 0.2800-0.9600j, 0.0000+0.0000j, 0.4472+0.8944j])
  7835. """.format(
  7836. **common_args
  7837. ),
  7838. )
  7839. add_docstr(
  7840. torch.sin,
  7841. r"""
  7842. sin(input, *, out=None) -> Tensor
  7843. Returns a new tensor with the sine of the elements of :attr:`input`.
  7844. .. math::
  7845. \text{out}_{i} = \sin(\text{input}_{i})
  7846. """
  7847. + r"""
  7848. Args:
  7849. {input}
  7850. Keyword args:
  7851. {out}
  7852. Example::
  7853. >>> a = torch.randn(4)
  7854. >>> a
  7855. tensor([-0.5461, 0.1347, -2.7266, -0.2746])
  7856. >>> torch.sin(a)
  7857. tensor([-0.5194, 0.1343, -0.4032, -0.2711])
  7858. """.format(
  7859. **common_args
  7860. ),
  7861. )
  7862. add_docstr(
  7863. torch.sinc,
  7864. r"""
  7865. sinc(input, *, out=None) -> Tensor
  7866. Alias for :func:`torch.special.sinc`.
  7867. """,
  7868. )
  7869. add_docstr(
  7870. torch.sinh,
  7871. r"""
  7872. sinh(input, *, out=None) -> Tensor
  7873. Returns a new tensor with the hyperbolic sine of the elements of
  7874. :attr:`input`.
  7875. .. math::
  7876. \text{out}_{i} = \sinh(\text{input}_{i})
  7877. """
  7878. + r"""
  7879. Args:
  7880. {input}
  7881. Keyword args:
  7882. {out}
  7883. Example::
  7884. >>> a = torch.randn(4)
  7885. >>> a
  7886. tensor([ 0.5380, -0.8632, -0.1265, 0.9399])
  7887. >>> torch.sinh(a)
  7888. tensor([ 0.5644, -0.9744, -0.1268, 1.0845])
  7889. .. note::
  7890. When :attr:`input` is on the CPU, the implementation of torch.sinh may use
  7891. the Sleef library, which rounds very large results to infinity or negative
  7892. infinity. See `here <https://sleef.org/purec.xhtml>`_ for details.
  7893. """.format(
  7894. **common_args
  7895. ),
  7896. )
  7897. add_docstr(
  7898. torch.sort,
  7899. r"""
  7900. sort(input, dim=-1, descending=False, stable=False, *, out=None) -> (Tensor, LongTensor)
  7901. Sorts the elements of the :attr:`input` tensor along a given dimension
  7902. in ascending order by value.
  7903. If :attr:`dim` is not given, the last dimension of the `input` is chosen.
  7904. If :attr:`descending` is ``True`` then the elements are sorted in descending
  7905. order by value.
  7906. If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving
  7907. the order of equivalent elements.
  7908. A namedtuple of (values, indices) is returned, where the `values` are the
  7909. sorted values and `indices` are the indices of the elements in the original
  7910. `input` tensor.
  7911. Args:
  7912. {input}
  7913. dim (int, optional): the dimension to sort along
  7914. descending (bool, optional): controls the sorting order (ascending or descending)
  7915. stable (bool, optional): makes the sorting routine stable, which guarantees that the order
  7916. of equivalent elements is preserved.
  7917. Keyword args:
  7918. out (tuple, optional): the output tuple of (`Tensor`, `LongTensor`) that can
  7919. be optionally given to be used as output buffers
  7920. Example::
  7921. >>> x = torch.randn(3, 4)
  7922. >>> sorted, indices = torch.sort(x)
  7923. >>> sorted
  7924. tensor([[-0.2162, 0.0608, 0.6719, 2.3332],
  7925. [-0.5793, 0.0061, 0.6058, 0.9497],
  7926. [-0.5071, 0.3343, 0.9553, 1.0960]])
  7927. >>> indices
  7928. tensor([[ 1, 0, 2, 3],
  7929. [ 3, 1, 0, 2],
  7930. [ 0, 3, 1, 2]])
  7931. >>> sorted, indices = torch.sort(x, 0)
  7932. >>> sorted
  7933. tensor([[-0.5071, -0.2162, 0.6719, -0.5793],
  7934. [ 0.0608, 0.0061, 0.9497, 0.3343],
  7935. [ 0.6058, 0.9553, 1.0960, 2.3332]])
  7936. >>> indices
  7937. tensor([[ 2, 0, 0, 1],
  7938. [ 0, 1, 1, 2],
  7939. [ 1, 2, 2, 0]])
  7940. >>> x = torch.tensor([0, 1] * 9)
  7941. >>> x.sort()
  7942. torch.return_types.sort(
  7943. values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
  7944. indices=tensor([ 2, 16, 4, 6, 14, 8, 0, 10, 12, 9, 17, 15, 13, 11, 7, 5, 3, 1]))
  7945. >>> x.sort(stable=True)
  7946. torch.return_types.sort(
  7947. values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
  7948. indices=tensor([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 1, 3, 5, 7, 9, 11, 13, 15, 17]))
  7949. """.format(
  7950. **common_args
  7951. ),
  7952. )
  7953. add_docstr(
  7954. torch.argsort,
  7955. r"""
  7956. argsort(input, dim=-1, descending=False, stable=False) -> Tensor
  7957. Returns the indices that sort a tensor along a given dimension in ascending
  7958. order by value.
  7959. This is the second value returned by :meth:`torch.sort`. See its documentation
  7960. for the exact semantics of this method.
  7961. If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving
  7962. the order of equivalent elements. If ``False``, the relative order of values
  7963. which compare equal is not guaranteed. ``True`` is slower.
  7964. Args:
  7965. {input}
  7966. dim (int, optional): the dimension to sort along
  7967. descending (bool, optional): controls the sorting order (ascending or descending)
  7968. stable (bool, optional): controls the relative order of equivalent elements
  7969. Example::
  7970. >>> a = torch.randn(4, 4)
  7971. >>> a
  7972. tensor([[ 0.0785, 1.5267, -0.8521, 0.4065],
  7973. [ 0.1598, 0.0788, -0.0745, -1.2700],
  7974. [ 1.2208, 1.0722, -0.7064, 1.2564],
  7975. [ 0.0669, -0.2318, -0.8229, -0.9280]])
  7976. >>> torch.argsort(a, dim=1)
  7977. tensor([[2, 0, 3, 1],
  7978. [3, 2, 1, 0],
  7979. [2, 1, 0, 3],
  7980. [3, 2, 1, 0]])
  7981. """.format(
  7982. **common_args
  7983. ),
  7984. )
  7985. add_docstr(
  7986. torch.msort,
  7987. r"""
  7988. msort(input, *, out=None) -> Tensor
  7989. Sorts the elements of the :attr:`input` tensor along its first dimension
  7990. in ascending order by value.
  7991. .. note:: `torch.msort(t)` is equivalent to `torch.sort(t, dim=0)[0]`.
  7992. See also :func:`torch.sort`.
  7993. Args:
  7994. {input}
  7995. Keyword args:
  7996. {out}
  7997. Example::
  7998. >>> t = torch.randn(3, 4)
  7999. >>> t
  8000. tensor([[-0.1321, 0.4370, -1.2631, -1.1289],
  8001. [-2.0527, -1.1250, 0.2275, 0.3077],
  8002. [-0.0881, -0.1259, -0.5495, 1.0284]])
  8003. >>> torch.msort(t)
  8004. tensor([[-2.0527, -1.1250, -1.2631, -1.1289],
  8005. [-0.1321, -0.1259, -0.5495, 0.3077],
  8006. [-0.0881, 0.4370, 0.2275, 1.0284]])
  8007. """.format(
  8008. **common_args
  8009. ),
  8010. )
  8011. add_docstr(
  8012. torch.sparse_compressed_tensor,
  8013. r"""sparse_compressed_tensor(compressed_indices, plain_indices, values, size=None, """
  8014. r"""*, dtype=None, layout=None, device=None, requires_grad=False, check_invariants=None) -> Tensor
  8015. Constructs a :ref:`sparse tensor in Compressed Sparse format - CSR,
  8016. CSC, BSR, or BSC - <sparse-compressed-docs>` with specified values at
  8017. the given :attr:`compressed_indices` and :attr:`plain_indices`. Sparse
  8018. matrix multiplication operations in Compressed Sparse format are
  8019. typically faster than that for sparse tensors in COO format. Make you
  8020. have a look at :ref:`the note on the data type of the indices
  8021. <sparse-compressed-docs>`.
  8022. {sparse_factory_device_note}
  8023. Args:
  8024. compressed_indices (array_like): (B+1)-dimensional array of size
  8025. ``(*batchsize, compressed_dim_size + 1)``. The last element of
  8026. each batch is the number of non-zero elements or blocks. This
  8027. tensor encodes the index in ``values`` and ``plain_indices``
  8028. depending on where the given compressed dimension (row or
  8029. column) starts. Each successive number in the tensor
  8030. subtracted by the number before it denotes the number of
  8031. elements or blocks in a given compressed dimension.
  8032. plain_indices (array_like): Plain dimension (column or row)
  8033. co-ordinates of each element or block in values. (B+1)-dimensional
  8034. tensor with the same length as values.
  8035. values (array_list): Initial values for the tensor. Can be a list,
  8036. tuple, NumPy ``ndarray``, scalar, and other types. that
  8037. represents a (1+K)-dimensional (for CSR and CSC layouts) or
  8038. (1+2+K)-dimensional tensor (for BSR and BSC layouts) where
  8039. ``K`` is the number of dense dimensions.
  8040. size (list, tuple, :class:`torch.Size`, optional): Size of the
  8041. sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols *
  8042. blocksize[1], *densesize)`` where ``blocksize[0] ==
  8043. blocksize[1] == 1`` for CSR and CSC formats. If not provided,
  8044. the size will be inferred as the minimum size big enough to
  8045. hold all non-zero elements or blocks.
  8046. Keyword args:
  8047. dtype (:class:`torch.dtype`, optional): the desired data type of
  8048. returned tensor. Default: if None, infers data type from
  8049. :attr:`values`.
  8050. layout (:class:`torch.layout`, required): the desired layout of
  8051. returned tensor: :attr:`torch.sparse_csr`,
  8052. :attr:`torch.sparse_csc`, :attr:`torch.sparse_bsr`, or
  8053. :attr:`torch.sparse_bsc`.
  8054. device (:class:`torch.device`, optional): the desired device of
  8055. returned tensor. Default: if None, uses the current device
  8056. for the default tensor type (see
  8057. :func:`torch.set_default_tensor_type`). :attr:`device` will be
  8058. the CPU for CPU tensor types and the current CUDA device for
  8059. CUDA tensor types.
  8060. {requires_grad}
  8061. {check_invariants}
  8062. Example::
  8063. >>> compressed_indices = [0, 2, 4]
  8064. >>> plain_indices = [0, 1, 0, 1]
  8065. >>> values = [1, 2, 3, 4]
  8066. >>> torch.sparse_compressed_tensor(torch.tensor(compressed_indices, dtype=torch.int64),
  8067. ... torch.tensor(plain_indices, dtype=torch.int64),
  8068. ... torch.tensor(values), dtype=torch.double, layout=torch.sparse_csr)
  8069. tensor(crow_indices=tensor([0, 2, 4]),
  8070. col_indices=tensor([0, 1, 0, 1]),
  8071. values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4,
  8072. dtype=torch.float64, layout=torch.sparse_csr)
  8073. """.format(
  8074. **factory_common_args
  8075. ),
  8076. )
  8077. add_docstr(
  8078. torch.sparse_csr_tensor,
  8079. r"""sparse_csr_tensor(crow_indices, col_indices, values, size=None, """
  8080. r"""*, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor
  8081. Constructs a :ref:`sparse tensor in CSR (Compressed Sparse Row) <sparse-csr-docs>` with specified
  8082. values at the given :attr:`crow_indices` and :attr:`col_indices`. Sparse matrix multiplication operations
  8083. in CSR format are typically faster than that for sparse tensors in COO format. Make you have a look
  8084. at :ref:`the note on the data type of the indices <sparse-csr-docs>`.
  8085. {sparse_factory_device_note}
  8086. Args:
  8087. crow_indices (array_like): (B+1)-dimensional array of size
  8088. ``(*batchsize, nrows + 1)``. The last element of each batch
  8089. is the number of non-zeros. This tensor encodes the index in
  8090. values and col_indices depending on where the given row
  8091. starts. Each successive number in the tensor subtracted by the
  8092. number before it denotes the number of elements in a given
  8093. row.
  8094. col_indices (array_like): Column co-ordinates of each element in
  8095. values. (B+1)-dimensional tensor with the same length
  8096. as values.
  8097. values (array_list): Initial values for the tensor. Can be a list,
  8098. tuple, NumPy ``ndarray``, scalar, and other types that
  8099. represents a (1+K)-dimensional tensor where ``K`` is the number
  8100. of dense dimensions.
  8101. size (list, tuple, :class:`torch.Size`, optional): Size of the
  8102. sparse tensor: ``(*batchsize, nrows, ncols, *densesize)``. If
  8103. not provided, the size will be inferred as the minimum size
  8104. big enough to hold all non-zero elements.
  8105. Keyword args:
  8106. dtype (:class:`torch.dtype`, optional): the desired data type of
  8107. returned tensor. Default: if None, infers data type from
  8108. :attr:`values`.
  8109. device (:class:`torch.device`, optional): the desired device of
  8110. returned tensor. Default: if None, uses the current device
  8111. for the default tensor type (see
  8112. :func:`torch.set_default_tensor_type`). :attr:`device` will be
  8113. the CPU for CPU tensor types and the current CUDA device for
  8114. CUDA tensor types.
  8115. {requires_grad}
  8116. {check_invariants}
  8117. Example::
  8118. >>> crow_indices = [0, 2, 4]
  8119. >>> col_indices = [0, 1, 0, 1]
  8120. >>> values = [1, 2, 3, 4]
  8121. >>> torch.sparse_csr_tensor(torch.tensor(crow_indices, dtype=torch.int64),
  8122. ... torch.tensor(col_indices, dtype=torch.int64),
  8123. ... torch.tensor(values), dtype=torch.double)
  8124. tensor(crow_indices=tensor([0, 2, 4]),
  8125. col_indices=tensor([0, 1, 0, 1]),
  8126. values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4,
  8127. dtype=torch.float64, layout=torch.sparse_csr)
  8128. """.format(
  8129. **factory_common_args
  8130. ),
  8131. )
  8132. add_docstr(
  8133. torch.sparse_csc_tensor,
  8134. r"""sparse_csc_tensor(ccol_indices, row_indices, values, size=None, """
  8135. r"""*, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor
  8136. Constructs a :ref:`sparse tensor in CSC (Compressed Sparse Column)
  8137. <sparse-csc-docs>` with specified values at the given
  8138. :attr:`ccol_indices` and :attr:`row_indices`. Sparse matrix
  8139. multiplication operations in CSC format are typically faster than that
  8140. for sparse tensors in COO format. Make you have a look at :ref:`the
  8141. note on the data type of the indices <sparse-csc-docs>`.
  8142. {sparse_factory_device_note}
  8143. Args:
  8144. ccol_indices (array_like): (B+1)-dimensional array of size
  8145. ``(*batchsize, ncols + 1)``. The last element of each batch
  8146. is the number of non-zeros. This tensor encodes the index in
  8147. values and row_indices depending on where the given column
  8148. starts. Each successive number in the tensor subtracted by the
  8149. number before it denotes the number of elements in a given
  8150. column.
  8151. row_indices (array_like): Row co-ordinates of each element in
  8152. values. (B+1)-dimensional tensor with the same length as
  8153. values.
  8154. values (array_list): Initial values for the tensor. Can be a list,
  8155. tuple, NumPy ``ndarray``, scalar, and other types that
  8156. represents a (1+K)-dimensional tensor where ``K`` is the number
  8157. of dense dimensions.
  8158. size (list, tuple, :class:`torch.Size`, optional): Size of the
  8159. sparse tensor: ``(*batchsize, nrows, ncols, *densesize)``. If
  8160. not provided, the size will be inferred as the minimum size
  8161. big enough to hold all non-zero elements.
  8162. Keyword args:
  8163. dtype (:class:`torch.dtype`, optional): the desired data type of
  8164. returned tensor. Default: if None, infers data type from
  8165. :attr:`values`.
  8166. device (:class:`torch.device`, optional): the desired device of
  8167. returned tensor. Default: if None, uses the current device
  8168. for the default tensor type (see
  8169. :func:`torch.set_default_tensor_type`). :attr:`device` will be
  8170. the CPU for CPU tensor types and the current CUDA device for
  8171. CUDA tensor types.
  8172. {requires_grad}
  8173. {check_invariants}
  8174. Example::
  8175. >>> ccol_indices = [0, 2, 4]
  8176. >>> row_indices = [0, 1, 0, 1]
  8177. >>> values = [1, 2, 3, 4]
  8178. >>> torch.sparse_csc_tensor(torch.tensor(ccol_indices, dtype=torch.int64),
  8179. ... torch.tensor(row_indices, dtype=torch.int64),
  8180. ... torch.tensor(values), dtype=torch.double)
  8181. tensor(ccol_indices=tensor([0, 2, 4]),
  8182. row_indices=tensor([0, 1, 0, 1]),
  8183. values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4,
  8184. dtype=torch.float64, layout=torch.sparse_csc)
  8185. """.format(
  8186. **factory_common_args
  8187. ),
  8188. )
  8189. add_docstr(
  8190. torch.sparse_bsr_tensor,
  8191. r"""sparse_bsr_tensor(crow_indices, col_indices, values, size=None, """
  8192. r"""*, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor
  8193. Constructs a :ref:`sparse tensor in BSR (Block Compressed Sparse Row))
  8194. <sparse-bsr-docs>` with specified 2-dimensional blocks at the given
  8195. :attr:`crow_indices` and :attr:`col_indices`. Sparse matrix
  8196. multiplication operations in BSR format are typically faster than that
  8197. for sparse tensors in COO format. Make you have a look at :ref:`the
  8198. note on the data type of the indices <sparse-bsr-docs>`.
  8199. {sparse_factory_device_note}
  8200. Args:
  8201. crow_indices (array_like): (B+1)-dimensional array of size
  8202. ``(*batchsize, nrowblocks + 1)``. The last element of each
  8203. batch is the number of non-zeros. This tensor encodes the
  8204. block index in values and col_indices depending on where the
  8205. given row block starts. Each successive number in the tensor
  8206. subtracted by the number before it denotes the number of
  8207. blocks in a given row.
  8208. col_indices (array_like): Column block co-ordinates of each block
  8209. in values. (B+1)-dimensional tensor with the same length as
  8210. values.
  8211. values (array_list): Initial values for the tensor. Can be a list,
  8212. tuple, NumPy ``ndarray``, scalar, and other types that
  8213. represents a (1 + 2 + K)-dimensional tensor where ``K`` is the
  8214. number of dense dimensions.
  8215. size (list, tuple, :class:`torch.Size`, optional): Size of the
  8216. sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols *
  8217. blocksize[1], *densesize)`` where ``blocksize ==
  8218. values.shape[1:3]``. If not provided, the size will be
  8219. inferred as the minimum size big enough to hold all non-zero
  8220. blocks.
  8221. Keyword args:
  8222. dtype (:class:`torch.dtype`, optional): the desired data type of
  8223. returned tensor. Default: if None, infers data type from
  8224. :attr:`values`.
  8225. device (:class:`torch.device`, optional): the desired device of
  8226. returned tensor. Default: if None, uses the current device
  8227. for the default tensor type (see
  8228. :func:`torch.set_default_tensor_type`). :attr:`device` will be
  8229. the CPU for CPU tensor types and the current CUDA device for
  8230. CUDA tensor types.
  8231. {requires_grad}
  8232. {check_invariants}
  8233. Example::
  8234. >>> crow_indices = [0, 1, 2]
  8235. >>> col_indices = [0, 1]
  8236. >>> values = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
  8237. >>> torch.sparse_bsr_tensor(torch.tensor(crow_indices, dtype=torch.int64),
  8238. ... torch.tensor(col_indices, dtype=torch.int64),
  8239. ... torch.tensor(values), dtype=torch.double)
  8240. tensor(crow_indices=tensor([0, 1, 2]),
  8241. col_indices=tensor([0, 1]),
  8242. values=tensor([[[1., 2.],
  8243. [3., 4.]],
  8244. [[5., 6.],
  8245. [7., 8.]]]), size=(2, 2), nnz=2, dtype=torch.float64,
  8246. layout=torch.sparse_bsr)
  8247. """.format(
  8248. **factory_common_args
  8249. ),
  8250. )
  8251. add_docstr(
  8252. torch.sparse_bsc_tensor,
  8253. r"""sparse_bsc_tensor(ccol_indices, row_indices, values, size=None, """
  8254. r"""*, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor
  8255. Constructs a :ref:`sparse tensor in BSC (Block Compressed Sparse
  8256. Column)) <sparse-bsc-docs>` with specified 2-dimensional blocks at the
  8257. given :attr:`ccol_indices` and :attr:`row_indices`. Sparse matrix
  8258. multiplication operations in BSC format are typically faster than that
  8259. for sparse tensors in COO format. Make you have a look at :ref:`the
  8260. note on the data type of the indices <sparse-bsc-docs>`.
  8261. {sparse_factory_device_note}
  8262. Args:
  8263. ccol_indices (array_like): (B+1)-dimensional array of size
  8264. ``(*batchsize, ncolblocks + 1)``. The last element of each
  8265. batch is the number of non-zeros. This tensor encodes the
  8266. index in values and row_indices depending on where the given
  8267. column starts. Each successive number in the tensor subtracted
  8268. by the number before it denotes the number of elements in a
  8269. given column.
  8270. row_indices (array_like): Row block co-ordinates of each block in
  8271. values. (B+1)-dimensional tensor with the same length
  8272. as values.
  8273. values (array_list): Initial blocks for the tensor. Can be a list,
  8274. tuple, NumPy ``ndarray``, and other types that
  8275. represents a (1 + 2 + K)-dimensional tensor where ``K`` is the
  8276. number of dense dimensions.
  8277. size (list, tuple, :class:`torch.Size`, optional): Size of the
  8278. sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols *
  8279. blocksize[1], *densesize)`` If not provided, the size will be
  8280. inferred as the minimum size big enough to hold all non-zero
  8281. blocks.
  8282. Keyword args:
  8283. dtype (:class:`torch.dtype`, optional): the desired data type of
  8284. returned tensor. Default: if None, infers data type from
  8285. :attr:`values`.
  8286. device (:class:`torch.device`, optional): the desired device of
  8287. returned tensor. Default: if None, uses the current device
  8288. for the default tensor type (see
  8289. :func:`torch.set_default_tensor_type`). :attr:`device` will be
  8290. the CPU for CPU tensor types and the current CUDA device for
  8291. CUDA tensor types.
  8292. {requires_grad}
  8293. {check_invariants}
  8294. Example::
  8295. >>> ccol_indices = [0, 1, 2]
  8296. >>> row_indices = [0, 1]
  8297. >>> values = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
  8298. >>> torch.sparse_bsc_tensor(torch.tensor(ccol_indices, dtype=torch.int64),
  8299. ... torch.tensor(row_indices, dtype=torch.int64),
  8300. ... torch.tensor(values), dtype=torch.double)
  8301. tensor(ccol_indices=tensor([0, 1, 2]),
  8302. row_indices=tensor([0, 1]),
  8303. values=tensor([[[1., 2.],
  8304. [3., 4.]],
  8305. [[5., 6.],
  8306. [7., 8.]]]), size=(2, 2), nnz=2, dtype=torch.float64,
  8307. layout=torch.sparse_bsc)
  8308. """.format(
  8309. **factory_common_args
  8310. ),
  8311. )
  8312. add_docstr(
  8313. torch.sparse_coo_tensor,
  8314. r"""
  8315. sparse_coo_tensor(indices, values, size=None, *, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor
  8316. Constructs a :ref:`sparse tensor in COO(rdinate) format
  8317. <sparse-coo-docs>` with specified values at the given
  8318. :attr:`indices`.
  8319. .. note::
  8320. This function returns an :ref:`uncoalesced tensor <sparse-uncoalesced-coo-docs>`.
  8321. {sparse_factory_device_note}
  8322. Args:
  8323. indices (array_like): Initial data for the tensor. Can be a list, tuple,
  8324. NumPy ``ndarray``, scalar, and other types. Will be cast to a :class:`torch.LongTensor`
  8325. internally. The indices are the coordinates of the non-zero values in the matrix, and thus
  8326. should be two-dimensional where the first dimension is the number of tensor dimensions and
  8327. the second dimension is the number of non-zero values.
  8328. values (array_like): Initial values for the tensor. Can be a list, tuple,
  8329. NumPy ``ndarray``, scalar, and other types.
  8330. size (list, tuple, or :class:`torch.Size`, optional): Size of the sparse tensor. If not
  8331. provided the size will be inferred as the minimum size big enough to hold all non-zero
  8332. elements.
  8333. Keyword args:
  8334. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  8335. Default: if None, infers data type from :attr:`values`.
  8336. device (:class:`torch.device`, optional): the desired device of returned tensor.
  8337. Default: if None, uses the current device for the default tensor type
  8338. (see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU
  8339. for CPU tensor types and the current CUDA device for CUDA tensor types.
  8340. {requires_grad}
  8341. {check_invariants}
  8342. Example::
  8343. >>> i = torch.tensor([[0, 1, 1],
  8344. ... [2, 0, 2]])
  8345. >>> v = torch.tensor([3, 4, 5], dtype=torch.float32)
  8346. >>> torch.sparse_coo_tensor(i, v, [2, 4])
  8347. tensor(indices=tensor([[0, 1, 1],
  8348. [2, 0, 2]]),
  8349. values=tensor([3., 4., 5.]),
  8350. size=(2, 4), nnz=3, layout=torch.sparse_coo)
  8351. >>> torch.sparse_coo_tensor(i, v) # Shape inference
  8352. tensor(indices=tensor([[0, 1, 1],
  8353. [2, 0, 2]]),
  8354. values=tensor([3., 4., 5.]),
  8355. size=(2, 3), nnz=3, layout=torch.sparse_coo)
  8356. >>> torch.sparse_coo_tensor(i, v, [2, 4],
  8357. ... dtype=torch.float64,
  8358. ... device=torch.device('cuda:0'))
  8359. tensor(indices=tensor([[0, 1, 1],
  8360. [2, 0, 2]]),
  8361. values=tensor([3., 4., 5.]),
  8362. device='cuda:0', size=(2, 4), nnz=3, dtype=torch.float64,
  8363. layout=torch.sparse_coo)
  8364. # Create an empty sparse tensor with the following invariants:
  8365. # 1. sparse_dim + dense_dim = len(SparseTensor.shape)
  8366. # 2. SparseTensor._indices().shape = (sparse_dim, nnz)
  8367. # 3. SparseTensor._values().shape = (nnz, SparseTensor.shape[sparse_dim:])
  8368. #
  8369. # For instance, to create an empty sparse tensor with nnz = 0, dense_dim = 0 and
  8370. # sparse_dim = 1 (hence indices is a 2D tensor of shape = (1, 0))
  8371. >>> S = torch.sparse_coo_tensor(torch.empty([1, 0]), [], [1])
  8372. tensor(indices=tensor([], size=(1, 0)),
  8373. values=tensor([], size=(0,)),
  8374. size=(1,), nnz=0, layout=torch.sparse_coo)
  8375. # and to create an empty sparse tensor with nnz = 0, dense_dim = 1 and
  8376. # sparse_dim = 1
  8377. >>> S = torch.sparse_coo_tensor(torch.empty([1, 0]), torch.empty([0, 2]), [1, 2])
  8378. tensor(indices=tensor([], size=(1, 0)),
  8379. values=tensor([], size=(0, 2)),
  8380. size=(1, 2), nnz=0, layout=torch.sparse_coo)
  8381. .. _torch.sparse: https://pytorch.org/docs/stable/sparse.html
  8382. """.format(
  8383. **factory_common_args
  8384. ),
  8385. )
  8386. add_docstr(
  8387. torch.sqrt,
  8388. r"""
  8389. sqrt(input, *, out=None) -> Tensor
  8390. Returns a new tensor with the square-root of the elements of :attr:`input`.
  8391. .. math::
  8392. \text{out}_{i} = \sqrt{\text{input}_{i}}
  8393. """
  8394. + r"""
  8395. Args:
  8396. {input}
  8397. Keyword args:
  8398. {out}
  8399. Example::
  8400. >>> a = torch.randn(4)
  8401. >>> a
  8402. tensor([-2.0755, 1.0226, 0.0831, 0.4806])
  8403. >>> torch.sqrt(a)
  8404. tensor([ nan, 1.0112, 0.2883, 0.6933])
  8405. """.format(
  8406. **common_args
  8407. ),
  8408. )
  8409. add_docstr(
  8410. torch.square,
  8411. r"""
  8412. square(input, *, out=None) -> Tensor
  8413. Returns a new tensor with the square of the elements of :attr:`input`.
  8414. Args:
  8415. {input}
  8416. Keyword args:
  8417. {out}
  8418. Example::
  8419. >>> a = torch.randn(4)
  8420. >>> a
  8421. tensor([-2.0755, 1.0226, 0.0831, 0.4806])
  8422. >>> torch.square(a)
  8423. tensor([ 4.3077, 1.0457, 0.0069, 0.2310])
  8424. """.format(
  8425. **common_args
  8426. ),
  8427. )
  8428. add_docstr(
  8429. torch.squeeze,
  8430. r"""
  8431. squeeze(input, dim=None) -> Tensor
  8432. Returns a tensor with all specified dimensions of :attr:`input` of size `1` removed.
  8433. For example, if `input` is of shape:
  8434. :math:`(A \times 1 \times B \times C \times 1 \times D)` then the `input.squeeze()`
  8435. will be of shape: :math:`(A \times B \times C \times D)`.
  8436. When :attr:`dim` is given, a squeeze operation is done only in the given
  8437. dimension(s). If `input` is of shape: :math:`(A \times 1 \times B)`,
  8438. ``squeeze(input, 0)`` leaves the tensor unchanged, but ``squeeze(input, 1)``
  8439. will squeeze the tensor to the shape :math:`(A \times B)`.
  8440. .. note:: The returned tensor shares the storage with the input tensor,
  8441. so changing the contents of one will change the contents of the other.
  8442. .. warning:: If the tensor has a batch dimension of size 1, then `squeeze(input)`
  8443. will also remove the batch dimension, which can lead to unexpected
  8444. errors. Consider specifying only the dims you wish to be squeezed.
  8445. Args:
  8446. {input}
  8447. dim (int or tuple of ints, optional): if given, the input will be squeezed
  8448. only in the specified dimensions.
  8449. .. versionchanged:: 2.0
  8450. :attr:`dim` now accepts tuples of dimensions.
  8451. Example::
  8452. >>> x = torch.zeros(2, 1, 2, 1, 2)
  8453. >>> x.size()
  8454. torch.Size([2, 1, 2, 1, 2])
  8455. >>> y = torch.squeeze(x)
  8456. >>> y.size()
  8457. torch.Size([2, 2, 2])
  8458. >>> y = torch.squeeze(x, 0)
  8459. >>> y.size()
  8460. torch.Size([2, 1, 2, 1, 2])
  8461. >>> y = torch.squeeze(x, 1)
  8462. >>> y.size()
  8463. torch.Size([2, 2, 1, 2])
  8464. >>> y = torch.squeeze(x, (1, 2, 3))
  8465. torch.Size([2, 2, 2])
  8466. """.format(
  8467. **common_args
  8468. ),
  8469. )
  8470. add_docstr(
  8471. torch.std,
  8472. r"""
  8473. std(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor
  8474. Calculates the standard deviation over the dimensions specified by :attr:`dim`.
  8475. :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to
  8476. reduce over all dimensions.
  8477. The standard deviation (:math:`\sigma`) is calculated as
  8478. .. math:: \sigma = \sqrt{\frac{1}{N - \delta N}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
  8479. where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
  8480. sample mean, :math:`N` is the number of samples and :math:`\delta N` is
  8481. the :attr:`correction`.
  8482. """
  8483. + r"""
  8484. {keepdim_details}
  8485. Args:
  8486. {input}
  8487. {dim}
  8488. Keyword args:
  8489. correction (int): difference between the sample size and sample degrees of freedom.
  8490. Defaults to `Bessel's correction`_, ``correction=1``.
  8491. .. versionchanged:: 2.0
  8492. Previously this argument was called ``unbiased`` and was a boolean
  8493. with ``True`` corresponding to ``correction=1`` and ``False`` being
  8494. ``correction=0``.
  8495. {keepdim}
  8496. {out}
  8497. Example:
  8498. >>> a = torch.tensor(
  8499. ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
  8500. ... [ 1.5027, -0.3270, 0.5905, 0.6538],
  8501. ... [-1.5745, 1.3330, -0.5596, -0.6548],
  8502. ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
  8503. >>> torch.std(a, dim=1, keepdim=True)
  8504. tensor([[1.0311],
  8505. [0.7477],
  8506. [1.2204],
  8507. [0.9087]])
  8508. .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
  8509. """.format(
  8510. **multi_dim_common
  8511. ),
  8512. )
  8513. add_docstr(
  8514. torch.std_mean,
  8515. r"""
  8516. std_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor)
  8517. Calculates the standard deviation and mean over the dimensions specified by
  8518. :attr:`dim`. :attr:`dim` can be a single dimension, list of dimensions, or
  8519. ``None`` to reduce over all dimensions.
  8520. The standard deviation (:math:`\sigma`) is calculated as
  8521. .. math:: \sigma = \sqrt{\frac{1}{N - \delta N}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
  8522. where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
  8523. sample mean, :math:`N` is the number of samples and :math:`\delta N` is
  8524. the :attr:`correction`.
  8525. """
  8526. + r"""
  8527. {keepdim_details}
  8528. Args:
  8529. {input}
  8530. {opt_dim}
  8531. Keyword args:
  8532. correction (int): difference between the sample size and sample degrees of freedom.
  8533. Defaults to `Bessel's correction`_, ``correction=1``.
  8534. .. versionchanged:: 2.0
  8535. Previously this argument was called ``unbiased`` and was a boolean
  8536. with ``True`` corresponding to ``correction=1`` and ``False`` being
  8537. ``correction=0``.
  8538. {keepdim}
  8539. {out}
  8540. Returns:
  8541. A tuple (std, mean) containing the standard deviation and mean.
  8542. Example:
  8543. >>> a = torch.tensor(
  8544. ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
  8545. ... [ 1.5027, -0.3270, 0.5905, 0.6538],
  8546. ... [-1.5745, 1.3330, -0.5596, -0.6548],
  8547. ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
  8548. >>> torch.std_mean(a, dim=0, keepdim=True)
  8549. (tensor([[1.2620, 1.0028, 1.0957, 0.6038]]),
  8550. tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]]))
  8551. .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
  8552. """.format(
  8553. **multi_dim_common
  8554. ),
  8555. )
  8556. add_docstr(
  8557. torch.sub,
  8558. r"""
  8559. sub(input, other, *, alpha=1, out=None) -> Tensor
  8560. Subtracts :attr:`other`, scaled by :attr:`alpha`, from :attr:`input`.
  8561. .. math::
  8562. \text{{out}}_i = \text{{input}}_i - \text{{alpha}} \times \text{{other}}_i
  8563. """
  8564. + r"""
  8565. Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
  8566. :ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
  8567. Args:
  8568. {input}
  8569. other (Tensor or Number): the tensor or number to subtract from :attr:`input`.
  8570. Keyword args:
  8571. alpha (Number): the multiplier for :attr:`other`.
  8572. {out}
  8573. Example::
  8574. >>> a = torch.tensor((1, 2))
  8575. >>> b = torch.tensor((0, 1))
  8576. >>> torch.sub(a, b, alpha=2)
  8577. tensor([1, 0])
  8578. """.format(
  8579. **common_args
  8580. ),
  8581. )
  8582. add_docstr(
  8583. torch.subtract,
  8584. r"""
  8585. subtract(input, other, *, alpha=1, out=None) -> Tensor
  8586. Alias for :func:`torch.sub`.
  8587. """,
  8588. )
  8589. add_docstr(
  8590. torch.sum,
  8591. r"""
  8592. sum(input, *, dtype=None) -> Tensor
  8593. Returns the sum of all elements in the :attr:`input` tensor.
  8594. Args:
  8595. {input}
  8596. Keyword args:
  8597. {dtype}
  8598. Example::
  8599. >>> a = torch.randn(1, 3)
  8600. >>> a
  8601. tensor([[ 0.1133, -0.9567, 0.2958]])
  8602. >>> torch.sum(a)
  8603. tensor(-0.5475)
  8604. .. function:: sum(input, dim, keepdim=False, *, dtype=None) -> Tensor
  8605. :noindex:
  8606. Returns the sum of each row of the :attr:`input` tensor in the given
  8607. dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
  8608. reduce over all of them.
  8609. {keepdim_details}
  8610. Args:
  8611. {input}
  8612. {opt_dim}
  8613. {keepdim}
  8614. Keyword args:
  8615. {dtype}
  8616. Example::
  8617. >>> a = torch.randn(4, 4)
  8618. >>> a
  8619. tensor([[ 0.0569, -0.2475, 0.0737, -0.3429],
  8620. [-0.2993, 0.9138, 0.9337, -1.6864],
  8621. [ 0.1132, 0.7892, -0.1003, 0.5688],
  8622. [ 0.3637, -0.9906, -0.4752, -1.5197]])
  8623. >>> torch.sum(a, 1)
  8624. tensor([-0.4598, -0.1381, 1.3708, -2.6217])
  8625. >>> b = torch.arange(4 * 5 * 6).view(4, 5, 6)
  8626. >>> torch.sum(b, (2, 1))
  8627. tensor([ 435., 1335., 2235., 3135.])
  8628. """.format(
  8629. **multi_dim_common
  8630. ),
  8631. )
  8632. add_docstr(
  8633. torch.nansum,
  8634. r"""
  8635. nansum(input, *, dtype=None) -> Tensor
  8636. Returns the sum of all elements, treating Not a Numbers (NaNs) as zero.
  8637. Args:
  8638. {input}
  8639. Keyword args:
  8640. {dtype}
  8641. Example::
  8642. >>> a = torch.tensor([1., 2., float('nan'), 4.])
  8643. >>> torch.nansum(a)
  8644. tensor(7.)
  8645. .. function:: nansum(input, dim, keepdim=False, *, dtype=None) -> Tensor
  8646. :noindex:
  8647. Returns the sum of each row of the :attr:`input` tensor in the given
  8648. dimension :attr:`dim`, treating Not a Numbers (NaNs) as zero.
  8649. If :attr:`dim` is a list of dimensions, reduce over all of them.
  8650. {keepdim_details}
  8651. Args:
  8652. {input}
  8653. {opt_dim}
  8654. {keepdim}
  8655. Keyword args:
  8656. {dtype}
  8657. Example::
  8658. >>> torch.nansum(torch.tensor([1., float("nan")]))
  8659. 1.0
  8660. >>> a = torch.tensor([[1, 2], [3., float("nan")]])
  8661. >>> torch.nansum(a)
  8662. tensor(6.)
  8663. >>> torch.nansum(a, dim=0)
  8664. tensor([4., 2.])
  8665. >>> torch.nansum(a, dim=1)
  8666. tensor([3., 3.])
  8667. """.format(
  8668. **multi_dim_common
  8669. ),
  8670. )
  8671. add_docstr(
  8672. torch.svd,
  8673. r"""
  8674. svd(input, some=True, compute_uv=True, *, out=None) -> (Tensor, Tensor, Tensor)
  8675. Computes the singular value decomposition of either a matrix or batch of
  8676. matrices :attr:`input`. The singular value decomposition is represented as a
  8677. namedtuple `(U, S, V)`, such that :attr:`input` :math:`= U \text{diag}(S) V^{\text{H}}`.
  8678. where :math:`V^{\text{H}}` is the transpose of `V` for real inputs,
  8679. and the conjugate transpose of `V` for complex inputs.
  8680. If :attr:`input` is a batch of matrices, then `U`, `S`, and `V` are also
  8681. batched with the same batch dimensions as :attr:`input`.
  8682. If :attr:`some` is `True` (default), the method returns the reduced singular
  8683. value decomposition. In this case, if the last two dimensions of :attr:`input` are
  8684. `m` and `n`, then the returned `U` and `V` matrices will contain only
  8685. `min(n, m)` orthonormal columns.
  8686. If :attr:`compute_uv` is `False`, the returned `U` and `V` will be
  8687. zero-filled matrices of shape `(m, m)` and `(n, n)`
  8688. respectively, and the same device as :attr:`input`. The argument :attr:`some`
  8689. has no effect when :attr:`compute_uv` is `False`.
  8690. Supports :attr:`input` of float, double, cfloat and cdouble data types.
  8691. The dtypes of `U` and `V` are the same as :attr:`input`'s. `S` will
  8692. always be real-valued, even if :attr:`input` is complex.
  8693. .. warning::
  8694. :func:`torch.svd` is deprecated in favor of :func:`torch.linalg.svd`
  8695. and will be removed in a future PyTorch release.
  8696. ``U, S, V = torch.svd(A, some=some, compute_uv=True)`` (default) should be replaced with
  8697. .. code:: python
  8698. U, S, Vh = torch.linalg.svd(A, full_matrices=not some)
  8699. V = Vh.mH
  8700. ``_, S, _ = torch.svd(A, some=some, compute_uv=False)`` should be replaced with
  8701. .. code:: python
  8702. S = torch.linalg.svdvals(A)
  8703. .. note:: Differences with :func:`torch.linalg.svd`:
  8704. * :attr:`some` is the opposite of
  8705. :func:`torch.linalg.svd`'s :attr:`full_matrices`. Note that
  8706. default value for both is `True`, so the default behavior is
  8707. effectively the opposite.
  8708. * :func:`torch.svd` returns `V`, whereas :func:`torch.linalg.svd` returns
  8709. `Vh`, that is, :math:`V^{\text{H}}`.
  8710. * If :attr:`compute_uv` is `False`, :func:`torch.svd` returns zero-filled
  8711. tensors for `U` and `Vh`, whereas :func:`torch.linalg.svd` returns
  8712. empty tensors.
  8713. .. note:: The singular values are returned in descending order. If :attr:`input` is a batch of matrices,
  8714. then the singular values of each matrix in the batch are returned in descending order.
  8715. .. note:: The `S` tensor can only be used to compute gradients if :attr:`compute_uv` is `True`.
  8716. .. note:: When :attr:`some` is `False`, the gradients on `U[..., :, min(m, n):]`
  8717. and `V[..., :, min(m, n):]` will be ignored in the backward pass, as those vectors
  8718. can be arbitrary bases of the corresponding subspaces.
  8719. .. note:: The implementation of :func:`torch.linalg.svd` on CPU uses LAPACK's routine `?gesdd`
  8720. (a divide-and-conquer algorithm) instead of `?gesvd` for speed. Analogously,
  8721. on GPU, it uses cuSOLVER's routines `gesvdj` and `gesvdjBatched` on CUDA 10.1.243
  8722. and later, and MAGMA's routine `gesdd` on earlier versions of CUDA.
  8723. .. note:: The returned `U` will not be contiguous. The matrix (or batch of matrices) will
  8724. be represented as a column-major matrix (i.e. Fortran-contiguous).
  8725. .. warning:: The gradients with respect to `U` and `V` will only be finite when the input does not
  8726. have zero nor repeated singular values.
  8727. .. warning:: If the distance between any two singular values is close to zero, the gradients with respect to
  8728. `U` and `V` will be numerically unstable, as they depends on
  8729. :math:`\frac{1}{\min_{i \neq j} \sigma_i^2 - \sigma_j^2}`. The same happens when the matrix
  8730. has small singular values, as these gradients also depend on `S⁻¹`.
  8731. .. warning:: For complex-valued :attr:`input` the singular value decomposition is not unique,
  8732. as `U` and `V` may be multiplied by an arbitrary phase factor :math:`e^{i \phi}` on every column.
  8733. The same happens when :attr:`input` has repeated singular values, where one may multiply
  8734. the columns of the spanning subspace in `U` and `V` by a rotation matrix
  8735. and `the resulting vectors will span the same subspace`_.
  8736. Different platforms, like NumPy, or inputs on different device types,
  8737. may produce different `U` and `V` tensors.
  8738. Args:
  8739. input (Tensor): the input tensor of size `(*, m, n)` where `*` is zero or more
  8740. batch dimensions consisting of `(m, n)` matrices.
  8741. some (bool, optional): controls whether to compute the reduced or full decomposition, and
  8742. consequently, the shape of returned `U` and `V`. Default: `True`.
  8743. compute_uv (bool, optional): controls whether to compute `U` and `V`. Default: `True`.
  8744. Keyword args:
  8745. out (tuple, optional): the output tuple of tensors
  8746. Example::
  8747. >>> a = torch.randn(5, 3)
  8748. >>> a
  8749. tensor([[ 0.2364, -0.7752, 0.6372],
  8750. [ 1.7201, 0.7394, -0.0504],
  8751. [-0.3371, -1.0584, 0.5296],
  8752. [ 0.3550, -0.4022, 1.5569],
  8753. [ 0.2445, -0.0158, 1.1414]])
  8754. >>> u, s, v = torch.svd(a)
  8755. >>> u
  8756. tensor([[ 0.4027, 0.0287, 0.5434],
  8757. [-0.1946, 0.8833, 0.3679],
  8758. [ 0.4296, -0.2890, 0.5261],
  8759. [ 0.6604, 0.2717, -0.2618],
  8760. [ 0.4234, 0.2481, -0.4733]])
  8761. >>> s
  8762. tensor([2.3289, 2.0315, 0.7806])
  8763. >>> v
  8764. tensor([[-0.0199, 0.8766, 0.4809],
  8765. [-0.5080, 0.4054, -0.7600],
  8766. [ 0.8611, 0.2594, -0.4373]])
  8767. >>> torch.dist(a, torch.mm(torch.mm(u, torch.diag(s)), v.t()))
  8768. tensor(8.6531e-07)
  8769. >>> a_big = torch.randn(7, 5, 3)
  8770. >>> u, s, v = torch.svd(a_big)
  8771. >>> torch.dist(a_big, torch.matmul(torch.matmul(u, torch.diag_embed(s)), v.mT))
  8772. tensor(2.6503e-06)
  8773. .. _the resulting vectors will span the same subspace:
  8774. (https://en.wikipedia.org/wiki/Singular_value_decomposition#Singular_values,_singular_vectors,_and_their_relation_to_the_SVD)
  8775. """,
  8776. )
  8777. add_docstr(
  8778. torch.t,
  8779. r"""
  8780. t(input) -> Tensor
  8781. Expects :attr:`input` to be <= 2-D tensor and transposes dimensions 0
  8782. and 1.
  8783. 0-D and 1-D tensors are returned as is. When input is a 2-D tensor this
  8784. is equivalent to ``transpose(input, 0, 1)``.
  8785. Args:
  8786. {input}
  8787. Example::
  8788. >>> x = torch.randn(())
  8789. >>> x
  8790. tensor(0.1995)
  8791. >>> torch.t(x)
  8792. tensor(0.1995)
  8793. >>> x = torch.randn(3)
  8794. >>> x
  8795. tensor([ 2.4320, -0.4608, 0.7702])
  8796. >>> torch.t(x)
  8797. tensor([ 2.4320, -0.4608, 0.7702])
  8798. >>> x = torch.randn(2, 3)
  8799. >>> x
  8800. tensor([[ 0.4875, 0.9158, -0.5872],
  8801. [ 0.3938, -0.6929, 0.6932]])
  8802. >>> torch.t(x)
  8803. tensor([[ 0.4875, 0.3938],
  8804. [ 0.9158, -0.6929],
  8805. [-0.5872, 0.6932]])
  8806. See also :func:`torch.transpose`.
  8807. """.format(
  8808. **common_args
  8809. ),
  8810. )
  8811. add_docstr(
  8812. torch.flip,
  8813. r"""
  8814. flip(input, dims) -> Tensor
  8815. Reverse the order of an n-D tensor along given axis in dims.
  8816. .. note::
  8817. `torch.flip` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.flip`,
  8818. which returns a view in constant time. Since copying a tensor's data is more work than viewing that data,
  8819. `torch.flip` is expected to be slower than `np.flip`.
  8820. Args:
  8821. {input}
  8822. dims (a list or tuple): axis to flip on
  8823. Example::
  8824. >>> x = torch.arange(8).view(2, 2, 2)
  8825. >>> x
  8826. tensor([[[ 0, 1],
  8827. [ 2, 3]],
  8828. [[ 4, 5],
  8829. [ 6, 7]]])
  8830. >>> torch.flip(x, [0, 1])
  8831. tensor([[[ 6, 7],
  8832. [ 4, 5]],
  8833. [[ 2, 3],
  8834. [ 0, 1]]])
  8835. """.format(
  8836. **common_args
  8837. ),
  8838. )
  8839. add_docstr(
  8840. torch.fliplr,
  8841. r"""
  8842. fliplr(input) -> Tensor
  8843. Flip tensor in the left/right direction, returning a new tensor.
  8844. Flip the entries in each row in the left/right direction.
  8845. Columns are preserved, but appear in a different order than before.
  8846. Note:
  8847. Requires the tensor to be at least 2-D.
  8848. .. note::
  8849. `torch.fliplr` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.fliplr`,
  8850. which returns a view in constant time. Since copying a tensor's data is more work than viewing that data,
  8851. `torch.fliplr` is expected to be slower than `np.fliplr`.
  8852. Args:
  8853. input (Tensor): Must be at least 2-dimensional.
  8854. Example::
  8855. >>> x = torch.arange(4).view(2, 2)
  8856. >>> x
  8857. tensor([[0, 1],
  8858. [2, 3]])
  8859. >>> torch.fliplr(x)
  8860. tensor([[1, 0],
  8861. [3, 2]])
  8862. """.format(
  8863. **common_args
  8864. ),
  8865. )
  8866. add_docstr(
  8867. torch.flipud,
  8868. r"""
  8869. flipud(input) -> Tensor
  8870. Flip tensor in the up/down direction, returning a new tensor.
  8871. Flip the entries in each column in the up/down direction.
  8872. Rows are preserved, but appear in a different order than before.
  8873. Note:
  8874. Requires the tensor to be at least 1-D.
  8875. .. note::
  8876. `torch.flipud` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.flipud`,
  8877. which returns a view in constant time. Since copying a tensor's data is more work than viewing that data,
  8878. `torch.flipud` is expected to be slower than `np.flipud`.
  8879. Args:
  8880. input (Tensor): Must be at least 1-dimensional.
  8881. Example::
  8882. >>> x = torch.arange(4).view(2, 2)
  8883. >>> x
  8884. tensor([[0, 1],
  8885. [2, 3]])
  8886. >>> torch.flipud(x)
  8887. tensor([[2, 3],
  8888. [0, 1]])
  8889. """.format(
  8890. **common_args
  8891. ),
  8892. )
  8893. add_docstr(
  8894. torch.roll,
  8895. r"""
  8896. roll(input, shifts, dims=None) -> Tensor
  8897. Roll the tensor :attr:`input` along the given dimension(s). Elements that are
  8898. shifted beyond the last position are re-introduced at the first position. If
  8899. :attr:`dims` is `None`, the tensor will be flattened before rolling and then
  8900. restored to the original shape.
  8901. Args:
  8902. {input}
  8903. shifts (int or tuple of ints): The number of places by which the elements
  8904. of the tensor are shifted. If shifts is a tuple, dims must be a tuple of
  8905. the same size, and each dimension will be rolled by the corresponding
  8906. value
  8907. dims (int or tuple of ints): Axis along which to roll
  8908. Example::
  8909. >>> x = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8]).view(4, 2)
  8910. >>> x
  8911. tensor([[1, 2],
  8912. [3, 4],
  8913. [5, 6],
  8914. [7, 8]])
  8915. >>> torch.roll(x, 1)
  8916. tensor([[8, 1],
  8917. [2, 3],
  8918. [4, 5],
  8919. [6, 7]])
  8920. >>> torch.roll(x, 1, 0)
  8921. tensor([[7, 8],
  8922. [1, 2],
  8923. [3, 4],
  8924. [5, 6]])
  8925. >>> torch.roll(x, -1, 0)
  8926. tensor([[3, 4],
  8927. [5, 6],
  8928. [7, 8],
  8929. [1, 2]])
  8930. >>> torch.roll(x, shifts=(2, 1), dims=(0, 1))
  8931. tensor([[6, 5],
  8932. [8, 7],
  8933. [2, 1],
  8934. [4, 3]])
  8935. """.format(
  8936. **common_args
  8937. ),
  8938. )
  8939. add_docstr(
  8940. torch.rot90,
  8941. r"""
  8942. rot90(input, k=1, dims=[0,1]) -> Tensor
  8943. Rotate an n-D tensor by 90 degrees in the plane specified by dims axis.
  8944. Rotation direction is from the first towards the second axis if k > 0, and from the second towards the first for k < 0.
  8945. Args:
  8946. {input}
  8947. k (int): number of times to rotate. Default value is 1
  8948. dims (a list or tuple): axis to rotate. Default value is [0, 1]
  8949. Example::
  8950. >>> x = torch.arange(4).view(2, 2)
  8951. >>> x
  8952. tensor([[0, 1],
  8953. [2, 3]])
  8954. >>> torch.rot90(x, 1, [0, 1])
  8955. tensor([[1, 3],
  8956. [0, 2]])
  8957. >>> x = torch.arange(8).view(2, 2, 2)
  8958. >>> x
  8959. tensor([[[0, 1],
  8960. [2, 3]],
  8961. [[4, 5],
  8962. [6, 7]]])
  8963. >>> torch.rot90(x, 1, [1, 2])
  8964. tensor([[[1, 3],
  8965. [0, 2]],
  8966. [[5, 7],
  8967. [4, 6]]])
  8968. """.format(
  8969. **common_args
  8970. ),
  8971. )
  8972. add_docstr(
  8973. torch.take,
  8974. r"""
  8975. take(input, index) -> Tensor
  8976. Returns a new tensor with the elements of :attr:`input` at the given indices.
  8977. The input tensor is treated as if it were viewed as a 1-D tensor. The result
  8978. takes the same shape as the indices.
  8979. Args:
  8980. {input}
  8981. index (LongTensor): the indices into tensor
  8982. Example::
  8983. >>> src = torch.tensor([[4, 3, 5],
  8984. ... [6, 7, 8]])
  8985. >>> torch.take(src, torch.tensor([0, 2, 5]))
  8986. tensor([ 4, 5, 8])
  8987. """.format(
  8988. **common_args
  8989. ),
  8990. )
  8991. add_docstr(
  8992. torch.take_along_dim,
  8993. r"""
  8994. take_along_dim(input, indices, dim, *, out=None) -> Tensor
  8995. Selects values from :attr:`input` at the 1-dimensional indices from :attr:`indices` along the given :attr:`dim`.
  8996. Functions that return indices along a dimension, like :func:`torch.argmax` and :func:`torch.argsort`,
  8997. are designed to work with this function. See the examples below.
  8998. .. note::
  8999. This function is similar to NumPy's `take_along_axis`.
  9000. See also :func:`torch.gather`.
  9001. Args:
  9002. {input}
  9003. indices (tensor): the indices into :attr:`input`. Must have long dtype.
  9004. dim (int): dimension to select along.
  9005. Keyword args:
  9006. {out}
  9007. Example::
  9008. >>> t = torch.tensor([[10, 30, 20], [60, 40, 50]])
  9009. >>> max_idx = torch.argmax(t)
  9010. >>> torch.take_along_dim(t, max_idx)
  9011. tensor([60])
  9012. >>> sorted_idx = torch.argsort(t, dim=1)
  9013. >>> torch.take_along_dim(t, sorted_idx, dim=1)
  9014. tensor([[10, 20, 30],
  9015. [40, 50, 60]])
  9016. """.format(
  9017. **common_args
  9018. ),
  9019. )
  9020. add_docstr(
  9021. torch.tan,
  9022. r"""
  9023. tan(input, *, out=None) -> Tensor
  9024. Returns a new tensor with the tangent of the elements of :attr:`input`.
  9025. .. math::
  9026. \text{out}_{i} = \tan(\text{input}_{i})
  9027. """
  9028. + r"""
  9029. Args:
  9030. {input}
  9031. Keyword args:
  9032. {out}
  9033. Example::
  9034. >>> a = torch.randn(4)
  9035. >>> a
  9036. tensor([-1.2027, -1.7687, 0.4412, -1.3856])
  9037. >>> torch.tan(a)
  9038. tensor([-2.5930, 4.9859, 0.4722, -5.3366])
  9039. """.format(
  9040. **common_args
  9041. ),
  9042. )
  9043. add_docstr(
  9044. torch.tanh,
  9045. r"""
  9046. tanh(input, *, out=None) -> Tensor
  9047. Returns a new tensor with the hyperbolic tangent of the elements
  9048. of :attr:`input`.
  9049. .. math::
  9050. \text{out}_{i} = \tanh(\text{input}_{i})
  9051. """
  9052. + r"""
  9053. Args:
  9054. {input}
  9055. Keyword args:
  9056. {out}
  9057. Example::
  9058. >>> a = torch.randn(4)
  9059. >>> a
  9060. tensor([ 0.8986, -0.7279, 1.1745, 0.2611])
  9061. >>> torch.tanh(a)
  9062. tensor([ 0.7156, -0.6218, 0.8257, 0.2553])
  9063. """.format(
  9064. **common_args
  9065. ),
  9066. )
  9067. add_docstr(
  9068. # torch.softmax doc str. Point this to torch.nn.functional.softmax
  9069. torch.softmax,
  9070. r"""
  9071. softmax(input, dim, *, dtype=None) -> Tensor
  9072. Alias for :func:`torch.nn.functional.softmax`.
  9073. """,
  9074. )
  9075. add_docstr(
  9076. torch.topk,
  9077. r"""
  9078. topk(input, k, dim=None, largest=True, sorted=True, *, out=None) -> (Tensor, LongTensor)
  9079. Returns the :attr:`k` largest elements of the given :attr:`input` tensor along
  9080. a given dimension.
  9081. If :attr:`dim` is not given, the last dimension of the `input` is chosen.
  9082. If :attr:`largest` is ``False`` then the `k` smallest elements are returned.
  9083. A namedtuple of `(values, indices)` is returned with the `values` and
  9084. `indices` of the largest `k` elements of each row of the `input` tensor in the
  9085. given dimension `dim`.
  9086. The boolean option :attr:`sorted` if ``True``, will make sure that the returned
  9087. `k` elements are themselves sorted
  9088. Args:
  9089. {input}
  9090. k (int): the k in "top-k"
  9091. dim (int, optional): the dimension to sort along
  9092. largest (bool, optional): controls whether to return largest or
  9093. smallest elements
  9094. sorted (bool, optional): controls whether to return the elements
  9095. in sorted order
  9096. Keyword args:
  9097. out (tuple, optional): the output tuple of (Tensor, LongTensor) that can be
  9098. optionally given to be used as output buffers
  9099. Example::
  9100. >>> x = torch.arange(1., 6.)
  9101. >>> x
  9102. tensor([ 1., 2., 3., 4., 5.])
  9103. >>> torch.topk(x, 3)
  9104. torch.return_types.topk(values=tensor([5., 4., 3.]), indices=tensor([4, 3, 2]))
  9105. """.format(
  9106. **common_args
  9107. ),
  9108. )
  9109. add_docstr(
  9110. torch.trace,
  9111. r"""
  9112. trace(input) -> Tensor
  9113. Returns the sum of the elements of the diagonal of the input 2-D matrix.
  9114. Example::
  9115. >>> x = torch.arange(1., 10.).view(3, 3)
  9116. >>> x
  9117. tensor([[ 1., 2., 3.],
  9118. [ 4., 5., 6.],
  9119. [ 7., 8., 9.]])
  9120. >>> torch.trace(x)
  9121. tensor(15.)
  9122. """,
  9123. )
  9124. add_docstr(
  9125. torch.transpose,
  9126. r"""
  9127. transpose(input, dim0, dim1) -> Tensor
  9128. Returns a tensor that is a transposed version of :attr:`input`.
  9129. The given dimensions :attr:`dim0` and :attr:`dim1` are swapped.
  9130. If :attr:`input` is a strided tensor then the resulting :attr:`out`
  9131. tensor shares its underlying storage with the :attr:`input` tensor, so
  9132. changing the content of one would change the content of the other.
  9133. If :attr:`input` is a :ref:`sparse tensor <sparse-docs>` then the
  9134. resulting :attr:`out` tensor *does not* share the underlying storage
  9135. with the :attr:`input` tensor.
  9136. If :attr:`input` is a :ref:`sparse tensor <sparse-docs>` with compressed
  9137. layout (SparseCSR, SparseBSR, SparseCSC or SparseBSC) the arguments
  9138. :attr:`dim0` and :attr:`dim1` must be both batch dimensions, or must
  9139. both be sparse dimensions. The batch dimensions of a sparse tensor are the
  9140. dimensions preceding the sparse dimensions.
  9141. .. note::
  9142. Transpositions which interchange the sparse dimensions of a `SparseCSR`
  9143. or `SparseCSC` layout tensor will result in the layout changing between
  9144. the two options. Transposition of the sparse dimensions of a ` SparseBSR`
  9145. or `SparseBSC` layout tensor will likewise generate a result with the
  9146. opposite layout.
  9147. Args:
  9148. {input}
  9149. dim0 (int): the first dimension to be transposed
  9150. dim1 (int): the second dimension to be transposed
  9151. Example::
  9152. >>> x = torch.randn(2, 3)
  9153. >>> x
  9154. tensor([[ 1.0028, -0.9893, 0.5809],
  9155. [-0.1669, 0.7299, 0.4942]])
  9156. >>> torch.transpose(x, 0, 1)
  9157. tensor([[ 1.0028, -0.1669],
  9158. [-0.9893, 0.7299],
  9159. [ 0.5809, 0.4942]])
  9160. See also :func:`torch.t`.
  9161. """.format(
  9162. **common_args
  9163. ),
  9164. )
  9165. add_docstr(
  9166. torch.triangular_solve,
  9167. r"""
  9168. triangular_solve(b, A, upper=True, transpose=False, unitriangular=False, *, out=None) -> (Tensor, Tensor)
  9169. Solves a system of equations with a square upper or lower triangular invertible matrix :math:`A`
  9170. and multiple right-hand sides :math:`b`.
  9171. In symbols, it solves :math:`AX = b` and assumes :math:`A` is square upper-triangular
  9172. (or lower-triangular if :attr:`upper`\ `= False`) and does not have zeros on the diagonal.
  9173. `torch.triangular_solve(b, A)` can take in 2D inputs `b, A` or inputs that are
  9174. batches of 2D matrices. If the inputs are batches, then returns
  9175. batched outputs `X`
  9176. If the diagonal of :attr:`A` contains zeros or elements that are very close to zero and
  9177. :attr:`unitriangular`\ `= False` (default) or if the input matrix is badly conditioned,
  9178. the result may contain `NaN` s.
  9179. Supports input of float, double, cfloat and cdouble data types.
  9180. .. warning::
  9181. :func:`torch.triangular_solve` is deprecated in favor of :func:`torch.linalg.solve_triangular`
  9182. and will be removed in a future PyTorch release.
  9183. :func:`torch.linalg.solve_triangular` has its arguments reversed and does not return a
  9184. copy of one of the inputs.
  9185. ``X = torch.triangular_solve(B, A).solution`` should be replaced with
  9186. .. code:: python
  9187. X = torch.linalg.solve_triangular(A, B)
  9188. Args:
  9189. b (Tensor): multiple right-hand sides of size :math:`(*, m, k)` where
  9190. :math:`*` is zero of more batch dimensions
  9191. A (Tensor): the input triangular coefficient matrix of size :math:`(*, m, m)`
  9192. where :math:`*` is zero or more batch dimensions
  9193. upper (bool, optional): whether :math:`A` is upper or lower triangular. Default: ``True``.
  9194. transpose (bool, optional): solves `op(A)X = b` where `op(A) = A^T` if this flag is ``True``,
  9195. and `op(A) = A` if it is ``False``. Default: ``False``.
  9196. unitriangular (bool, optional): whether :math:`A` is unit triangular.
  9197. If True, the diagonal elements of :math:`A` are assumed to be
  9198. 1 and not referenced from :math:`A`. Default: ``False``.
  9199. Keyword args:
  9200. out ((Tensor, Tensor), optional): tuple of two tensors to write
  9201. the output to. Ignored if `None`. Default: `None`.
  9202. Returns:
  9203. A namedtuple `(solution, cloned_coefficient)` where `cloned_coefficient`
  9204. is a clone of :math:`A` and `solution` is the solution :math:`X` to :math:`AX = b`
  9205. (or whatever variant of the system of equations, depending on the keyword arguments.)
  9206. Examples::
  9207. >>> A = torch.randn(2, 2).triu()
  9208. >>> A
  9209. tensor([[ 1.1527, -1.0753],
  9210. [ 0.0000, 0.7986]])
  9211. >>> b = torch.randn(2, 3)
  9212. >>> b
  9213. tensor([[-0.0210, 2.3513, -1.5492],
  9214. [ 1.5429, 0.7403, -1.0243]])
  9215. >>> torch.triangular_solve(b, A)
  9216. torch.return_types.triangular_solve(
  9217. solution=tensor([[ 1.7841, 2.9046, -2.5405],
  9218. [ 1.9320, 0.9270, -1.2826]]),
  9219. cloned_coefficient=tensor([[ 1.1527, -1.0753],
  9220. [ 0.0000, 0.7986]]))
  9221. """,
  9222. )
  9223. add_docstr(
  9224. torch.tril,
  9225. r"""
  9226. tril(input, diagonal=0, *, out=None) -> Tensor
  9227. Returns the lower triangular part of the matrix (2-D tensor) or batch of matrices
  9228. :attr:`input`, the other elements of the result tensor :attr:`out` are set to 0.
  9229. The lower triangular part of the matrix is defined as the elements on and
  9230. below the diagonal.
  9231. The argument :attr:`diagonal` controls which diagonal to consider. If
  9232. :attr:`diagonal` = 0, all elements on and below the main diagonal are
  9233. retained. A positive value includes just as many diagonals above the main
  9234. diagonal, and similarly a negative value excludes just as many diagonals below
  9235. the main diagonal. The main diagonal are the set of indices
  9236. :math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` where
  9237. :math:`d_{1}, d_{2}` are the dimensions of the matrix.
  9238. """
  9239. + r"""
  9240. Args:
  9241. {input}
  9242. diagonal (int, optional): the diagonal to consider
  9243. Keyword args:
  9244. {out}
  9245. Example::
  9246. >>> a = torch.randn(3, 3)
  9247. >>> a
  9248. tensor([[-1.0813, -0.8619, 0.7105],
  9249. [ 0.0935, 0.1380, 2.2112],
  9250. [-0.3409, -0.9828, 0.0289]])
  9251. >>> torch.tril(a)
  9252. tensor([[-1.0813, 0.0000, 0.0000],
  9253. [ 0.0935, 0.1380, 0.0000],
  9254. [-0.3409, -0.9828, 0.0289]])
  9255. >>> b = torch.randn(4, 6)
  9256. >>> b
  9257. tensor([[ 1.2219, 0.5653, -0.2521, -0.2345, 1.2544, 0.3461],
  9258. [ 0.4785, -0.4477, 0.6049, 0.6368, 0.8775, 0.7145],
  9259. [ 1.1502, 3.2716, -1.1243, -0.5413, 0.3615, 0.6864],
  9260. [-0.0614, -0.7344, -1.3164, -0.7648, -1.4024, 0.0978]])
  9261. >>> torch.tril(b, diagonal=1)
  9262. tensor([[ 1.2219, 0.5653, 0.0000, 0.0000, 0.0000, 0.0000],
  9263. [ 0.4785, -0.4477, 0.6049, 0.0000, 0.0000, 0.0000],
  9264. [ 1.1502, 3.2716, -1.1243, -0.5413, 0.0000, 0.0000],
  9265. [-0.0614, -0.7344, -1.3164, -0.7648, -1.4024, 0.0000]])
  9266. >>> torch.tril(b, diagonal=-1)
  9267. tensor([[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
  9268. [ 0.4785, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
  9269. [ 1.1502, 3.2716, 0.0000, 0.0000, 0.0000, 0.0000],
  9270. [-0.0614, -0.7344, -1.3164, 0.0000, 0.0000, 0.0000]])
  9271. """.format(
  9272. **common_args
  9273. ),
  9274. )
  9275. # docstr is split in two parts to avoid format mis-captureing :math: braces '{}'
  9276. # as common args.
  9277. add_docstr(
  9278. torch.tril_indices,
  9279. r"""
  9280. tril_indices(row, col, offset=0, *, dtype=torch.long, device='cpu', layout=torch.strided) -> Tensor
  9281. Returns the indices of the lower triangular part of a :attr:`row`-by-
  9282. :attr:`col` matrix in a 2-by-N Tensor, where the first row contains row
  9283. coordinates of all indices and the second row contains column coordinates.
  9284. Indices are ordered based on rows and then columns.
  9285. The lower triangular part of the matrix is defined as the elements on and
  9286. below the diagonal.
  9287. The argument :attr:`offset` controls which diagonal to consider. If
  9288. :attr:`offset` = 0, all elements on and below the main diagonal are
  9289. retained. A positive value includes just as many diagonals above the main
  9290. diagonal, and similarly a negative value excludes just as many diagonals below
  9291. the main diagonal. The main diagonal are the set of indices
  9292. :math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]`
  9293. where :math:`d_{1}, d_{2}` are the dimensions of the matrix.
  9294. .. note::
  9295. When running on CUDA, ``row * col`` must be less than :math:`2^{59}` to
  9296. prevent overflow during calculation.
  9297. """
  9298. + r"""
  9299. Args:
  9300. row (``int``): number of rows in the 2-D matrix.
  9301. col (``int``): number of columns in the 2-D matrix.
  9302. offset (``int``): diagonal offset from the main diagonal.
  9303. Default: if not provided, 0.
  9304. Keyword args:
  9305. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  9306. Default: if ``None``, ``torch.long``.
  9307. {device}
  9308. layout (:class:`torch.layout`, optional): currently only support ``torch.strided``.
  9309. Example::
  9310. >>> a = torch.tril_indices(3, 3)
  9311. >>> a
  9312. tensor([[0, 1, 1, 2, 2, 2],
  9313. [0, 0, 1, 0, 1, 2]])
  9314. >>> a = torch.tril_indices(4, 3, -1)
  9315. >>> a
  9316. tensor([[1, 2, 2, 3, 3, 3],
  9317. [0, 0, 1, 0, 1, 2]])
  9318. >>> a = torch.tril_indices(4, 3, 1)
  9319. >>> a
  9320. tensor([[0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3],
  9321. [0, 1, 0, 1, 2, 0, 1, 2, 0, 1, 2]])
  9322. """.format(
  9323. **factory_common_args
  9324. ),
  9325. )
  9326. add_docstr(
  9327. torch.triu,
  9328. r"""
  9329. triu(input, diagonal=0, *, out=None) -> Tensor
  9330. Returns the upper triangular part of a matrix (2-D tensor) or batch of matrices
  9331. :attr:`input`, the other elements of the result tensor :attr:`out` are set to 0.
  9332. The upper triangular part of the matrix is defined as the elements on and
  9333. above the diagonal.
  9334. The argument :attr:`diagonal` controls which diagonal to consider. If
  9335. :attr:`diagonal` = 0, all elements on and above the main diagonal are
  9336. retained. A positive value excludes just as many diagonals above the main
  9337. diagonal, and similarly a negative value includes just as many diagonals below
  9338. the main diagonal. The main diagonal are the set of indices
  9339. :math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` where
  9340. :math:`d_{1}, d_{2}` are the dimensions of the matrix.
  9341. """
  9342. + r"""
  9343. Args:
  9344. {input}
  9345. diagonal (int, optional): the diagonal to consider
  9346. Keyword args:
  9347. {out}
  9348. Example::
  9349. >>> a = torch.randn(3, 3)
  9350. >>> a
  9351. tensor([[ 0.2309, 0.5207, 2.0049],
  9352. [ 0.2072, -1.0680, 0.6602],
  9353. [ 0.3480, -0.5211, -0.4573]])
  9354. >>> torch.triu(a)
  9355. tensor([[ 0.2309, 0.5207, 2.0049],
  9356. [ 0.0000, -1.0680, 0.6602],
  9357. [ 0.0000, 0.0000, -0.4573]])
  9358. >>> torch.triu(a, diagonal=1)
  9359. tensor([[ 0.0000, 0.5207, 2.0049],
  9360. [ 0.0000, 0.0000, 0.6602],
  9361. [ 0.0000, 0.0000, 0.0000]])
  9362. >>> torch.triu(a, diagonal=-1)
  9363. tensor([[ 0.2309, 0.5207, 2.0049],
  9364. [ 0.2072, -1.0680, 0.6602],
  9365. [ 0.0000, -0.5211, -0.4573]])
  9366. >>> b = torch.randn(4, 6)
  9367. >>> b
  9368. tensor([[ 0.5876, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235],
  9369. [-0.2447, 0.9556, -1.2919, 1.3378, -0.1768, -1.0857],
  9370. [ 0.4333, 0.3146, 0.6576, -1.0432, 0.9348, -0.4410],
  9371. [-0.9888, 1.0679, -1.3337, -1.6556, 0.4798, 0.2830]])
  9372. >>> torch.triu(b, diagonal=1)
  9373. tensor([[ 0.0000, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235],
  9374. [ 0.0000, 0.0000, -1.2919, 1.3378, -0.1768, -1.0857],
  9375. [ 0.0000, 0.0000, 0.0000, -1.0432, 0.9348, -0.4410],
  9376. [ 0.0000, 0.0000, 0.0000, 0.0000, 0.4798, 0.2830]])
  9377. >>> torch.triu(b, diagonal=-1)
  9378. tensor([[ 0.5876, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235],
  9379. [-0.2447, 0.9556, -1.2919, 1.3378, -0.1768, -1.0857],
  9380. [ 0.0000, 0.3146, 0.6576, -1.0432, 0.9348, -0.4410],
  9381. [ 0.0000, 0.0000, -1.3337, -1.6556, 0.4798, 0.2830]])
  9382. """.format(
  9383. **common_args
  9384. ),
  9385. )
  9386. # docstr is split in two parts to avoid format mis-capturing :math: braces '{}'
  9387. # as common args.
  9388. add_docstr(
  9389. torch.triu_indices,
  9390. r"""
  9391. triu_indices(row, col, offset=0, *, dtype=torch.long, device='cpu', layout=torch.strided) -> Tensor
  9392. Returns the indices of the upper triangular part of a :attr:`row` by
  9393. :attr:`col` matrix in a 2-by-N Tensor, where the first row contains row
  9394. coordinates of all indices and the second row contains column coordinates.
  9395. Indices are ordered based on rows and then columns.
  9396. The upper triangular part of the matrix is defined as the elements on and
  9397. above the diagonal.
  9398. The argument :attr:`offset` controls which diagonal to consider. If
  9399. :attr:`offset` = 0, all elements on and above the main diagonal are
  9400. retained. A positive value excludes just as many diagonals above the main
  9401. diagonal, and similarly a negative value includes just as many diagonals below
  9402. the main diagonal. The main diagonal are the set of indices
  9403. :math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]`
  9404. where :math:`d_{1}, d_{2}` are the dimensions of the matrix.
  9405. .. note::
  9406. When running on CUDA, ``row * col`` must be less than :math:`2^{59}` to
  9407. prevent overflow during calculation.
  9408. """
  9409. + r"""
  9410. Args:
  9411. row (``int``): number of rows in the 2-D matrix.
  9412. col (``int``): number of columns in the 2-D matrix.
  9413. offset (``int``): diagonal offset from the main diagonal.
  9414. Default: if not provided, 0.
  9415. Keyword args:
  9416. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  9417. Default: if ``None``, ``torch.long``.
  9418. {device}
  9419. layout (:class:`torch.layout`, optional): currently only support ``torch.strided``.
  9420. Example::
  9421. >>> a = torch.triu_indices(3, 3)
  9422. >>> a
  9423. tensor([[0, 0, 0, 1, 1, 2],
  9424. [0, 1, 2, 1, 2, 2]])
  9425. >>> a = torch.triu_indices(4, 3, -1)
  9426. >>> a
  9427. tensor([[0, 0, 0, 1, 1, 1, 2, 2, 3],
  9428. [0, 1, 2, 0, 1, 2, 1, 2, 2]])
  9429. >>> a = torch.triu_indices(4, 3, 1)
  9430. >>> a
  9431. tensor([[0, 0, 1],
  9432. [1, 2, 2]])
  9433. """.format(
  9434. **factory_common_args
  9435. ),
  9436. )
  9437. add_docstr(
  9438. torch.true_divide,
  9439. r"""
  9440. true_divide(dividend, divisor, *, out) -> Tensor
  9441. Alias for :func:`torch.div` with ``rounding_mode=None``.
  9442. """,
  9443. )
  9444. add_docstr(
  9445. torch.trunc,
  9446. r"""
  9447. trunc(input, *, out=None) -> Tensor
  9448. Returns a new tensor with the truncated integer values of
  9449. the elements of :attr:`input`.
  9450. For integer inputs, follows the array-api convention of returning a
  9451. copy of the input tensor.
  9452. Args:
  9453. {input}
  9454. Keyword args:
  9455. {out}
  9456. Example::
  9457. >>> a = torch.randn(4)
  9458. >>> a
  9459. tensor([ 3.4742, 0.5466, -0.8008, -0.9079])
  9460. >>> torch.trunc(a)
  9461. tensor([ 3., 0., -0., -0.])
  9462. """.format(
  9463. **common_args
  9464. ),
  9465. )
  9466. add_docstr(
  9467. torch.fake_quantize_per_tensor_affine,
  9468. r"""
  9469. fake_quantize_per_tensor_affine(input, scale, zero_point, quant_min, quant_max) -> Tensor
  9470. Returns a new tensor with the data in :attr:`input` fake quantized using :attr:`scale`,
  9471. :attr:`zero_point`, :attr:`quant_min` and :attr:`quant_max`.
  9472. .. math::
  9473. \text{output} = min(
  9474. \text{quant\_max},
  9475. max(
  9476. \text{quant\_min},
  9477. \text{std::nearby\_int}(\text{input} / \text{scale}) + \text{zero\_point}
  9478. )
  9479. )
  9480. Args:
  9481. input (Tensor): the input value(s), ``torch.float32`` tensor
  9482. scale (double scalar or ``float32`` Tensor): quantization scale
  9483. zero_point (int64 scalar or ``int32`` Tensor): quantization zero_point
  9484. quant_min (int64): lower bound of the quantized domain
  9485. quant_max (int64): upper bound of the quantized domain
  9486. Returns:
  9487. Tensor: A newly fake_quantized ``torch.float32`` tensor
  9488. Example::
  9489. >>> x = torch.randn(4)
  9490. >>> x
  9491. tensor([ 0.0552, 0.9730, 0.3973, -1.0780])
  9492. >>> torch.fake_quantize_per_tensor_affine(x, 0.1, 0, 0, 255)
  9493. tensor([0.1000, 1.0000, 0.4000, 0.0000])
  9494. >>> torch.fake_quantize_per_tensor_affine(x, torch.tensor(0.1), torch.tensor(0), 0, 255)
  9495. tensor([0.6000, 0.4000, 0.0000, 0.0000])
  9496. """,
  9497. )
  9498. add_docstr(
  9499. torch.fake_quantize_per_channel_affine,
  9500. r"""
  9501. fake_quantize_per_channel_affine(input, scale, zero_point, quant_min, quant_max) -> Tensor
  9502. Returns a new tensor with the data in :attr:`input` fake quantized per channel using :attr:`scale`,
  9503. :attr:`zero_point`, :attr:`quant_min` and :attr:`quant_max`, across the channel specified by :attr:`axis`.
  9504. .. math::
  9505. \text{output} = min(
  9506. \text{quant\_max},
  9507. max(
  9508. \text{quant\_min},
  9509. \text{std::nearby\_int}(\text{input} / \text{scale}) + \text{zero\_point}
  9510. )
  9511. )
  9512. Args:
  9513. input (Tensor): the input value(s), in ``torch.float32``
  9514. scale (Tensor): quantization scale, per channel in ``torch.float32``
  9515. zero_point (Tensor): quantization zero_point, per channel in ``torch.int32`` or ``torch.half`` or ``torch.float32``
  9516. axis (int32): channel axis
  9517. quant_min (int64): lower bound of the quantized domain
  9518. quant_max (int64): upper bound of the quantized domain
  9519. Returns:
  9520. Tensor: A newly fake_quantized per channel ``torch.float32`` tensor
  9521. Example::
  9522. >>> x = torch.randn(2, 2, 2)
  9523. >>> x
  9524. tensor([[[-0.2525, -0.0466],
  9525. [ 0.3491, -0.2168]],
  9526. [[-0.5906, 1.6258],
  9527. [ 0.6444, -0.0542]]])
  9528. >>> scales = (torch.randn(2) + 1) * 0.05
  9529. >>> scales
  9530. tensor([0.0475, 0.0486])
  9531. >>> zero_points = torch.zeros(2).to(torch.int32)
  9532. >>> zero_points
  9533. tensor([0, 0])
  9534. >>> torch.fake_quantize_per_channel_affine(x, scales, zero_points, 1, 0, 255)
  9535. tensor([[[0.0000, 0.0000],
  9536. [0.3405, 0.0000]],
  9537. [[0.0000, 1.6134],
  9538. [0.6323, 0.0000]]])
  9539. """,
  9540. )
  9541. add_docstr(
  9542. torch.fix,
  9543. r"""
  9544. fix(input, *, out=None) -> Tensor
  9545. Alias for :func:`torch.trunc`
  9546. """,
  9547. )
  9548. add_docstr(
  9549. torch.unsqueeze,
  9550. r"""
  9551. unsqueeze(input, dim) -> Tensor
  9552. Returns a new tensor with a dimension of size one inserted at the
  9553. specified position.
  9554. The returned tensor shares the same underlying data with this tensor.
  9555. A :attr:`dim` value within the range ``[-input.dim() - 1, input.dim() + 1)``
  9556. can be used. Negative :attr:`dim` will correspond to :meth:`unsqueeze`
  9557. applied at :attr:`dim` = ``dim + input.dim() + 1``.
  9558. Args:
  9559. {input}
  9560. dim (int): the index at which to insert the singleton dimension
  9561. Example::
  9562. >>> x = torch.tensor([1, 2, 3, 4])
  9563. >>> torch.unsqueeze(x, 0)
  9564. tensor([[ 1, 2, 3, 4]])
  9565. >>> torch.unsqueeze(x, 1)
  9566. tensor([[ 1],
  9567. [ 2],
  9568. [ 3],
  9569. [ 4]])
  9570. """.format(
  9571. **common_args
  9572. ),
  9573. )
  9574. add_docstr(
  9575. torch.var,
  9576. r"""
  9577. var(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor
  9578. Calculates the variance over the dimensions specified by :attr:`dim`. :attr:`dim`
  9579. can be a single dimension, list of dimensions, or ``None`` to reduce over all
  9580. dimensions.
  9581. The variance (:math:`\sigma^2`) is calculated as
  9582. .. math:: \sigma^2 = \frac{1}{N - \delta N}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
  9583. where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
  9584. sample mean, :math:`N` is the number of samples and :math:`\delta N` is
  9585. the :attr:`correction`.
  9586. """
  9587. + r"""
  9588. {keepdim_details}
  9589. Args:
  9590. {input}
  9591. {opt_dim}
  9592. Keyword args:
  9593. correction (int): difference between the sample size and sample degrees of freedom.
  9594. Defaults to `Bessel's correction`_, ``correction=1``.
  9595. .. versionchanged:: 2.0
  9596. Previously this argument was called ``unbiased`` and was a boolean
  9597. with ``True`` corresponding to ``correction=1`` and ``False`` being
  9598. ``correction=0``.
  9599. {keepdim}
  9600. {out}
  9601. Example:
  9602. >>> a = torch.tensor(
  9603. ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
  9604. ... [ 1.5027, -0.3270, 0.5905, 0.6538],
  9605. ... [-1.5745, 1.3330, -0.5596, -0.6548],
  9606. ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
  9607. >>> torch.var(a, dim=1, keepdim=True)
  9608. tensor([[1.0631],
  9609. [0.5590],
  9610. [1.4893],
  9611. [0.8258]])
  9612. .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
  9613. """.format(
  9614. **multi_dim_common
  9615. ),
  9616. )
  9617. add_docstr(
  9618. torch.var_mean,
  9619. r"""
  9620. var_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor)
  9621. Calculates the variance and mean over the dimensions specified by :attr:`dim`.
  9622. :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to
  9623. reduce over all dimensions.
  9624. The variance (:math:`\sigma^2`) is calculated as
  9625. .. math:: \sigma^2 = \frac{1}{N - \delta N}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
  9626. where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
  9627. sample mean, :math:`N` is the number of samples and :math:`\delta N` is
  9628. the :attr:`correction`.
  9629. """
  9630. + r"""
  9631. {keepdim_details}
  9632. Args:
  9633. {input}
  9634. {opt_dim}
  9635. Keyword args:
  9636. correction (int): difference between the sample size and sample degrees of freedom.
  9637. Defaults to `Bessel's correction`_, ``correction=1``.
  9638. .. versionchanged:: 2.0
  9639. Previously this argument was called ``unbiased`` and was a boolean
  9640. with ``True`` corresponding to ``correction=1`` and ``False`` being
  9641. ``correction=0``.
  9642. {keepdim}
  9643. {out}
  9644. Returns:
  9645. A tuple (var, mean) containing the variance and mean.
  9646. Example:
  9647. >>> a = torch.tensor(
  9648. ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
  9649. ... [ 1.5027, -0.3270, 0.5905, 0.6538],
  9650. ... [-1.5745, 1.3330, -0.5596, -0.6548],
  9651. ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
  9652. >>> torch.var_mean(a, dim=0, keepdim=True)
  9653. (tensor([[1.5926, 1.0056, 1.2005, 0.3646]]),
  9654. tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]]))
  9655. .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
  9656. """.format(
  9657. **multi_dim_common
  9658. ),
  9659. )
  9660. add_docstr(
  9661. torch.zeros,
  9662. r"""
  9663. zeros(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  9664. Returns a tensor filled with the scalar value `0`, with the shape defined
  9665. by the variable argument :attr:`size`.
  9666. Args:
  9667. size (int...): a sequence of integers defining the shape of the output tensor.
  9668. Can be a variable number of arguments or a collection like a list or tuple.
  9669. Keyword args:
  9670. {out}
  9671. {dtype}
  9672. {layout}
  9673. {device}
  9674. {requires_grad}
  9675. Example::
  9676. >>> torch.zeros(2, 3)
  9677. tensor([[ 0., 0., 0.],
  9678. [ 0., 0., 0.]])
  9679. >>> torch.zeros(5)
  9680. tensor([ 0., 0., 0., 0., 0.])
  9681. """.format(
  9682. **factory_common_args
  9683. ),
  9684. )
  9685. add_docstr(
  9686. torch.zeros_like,
  9687. r"""
  9688. zeros_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
  9689. Returns a tensor filled with the scalar value `0`, with the same size as
  9690. :attr:`input`. ``torch.zeros_like(input)`` is equivalent to
  9691. ``torch.zeros(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
  9692. .. warning::
  9693. As of 0.4, this function does not support an :attr:`out` keyword. As an alternative,
  9694. the old ``torch.zeros_like(input, out=output)`` is equivalent to
  9695. ``torch.zeros(input.size(), out=output)``.
  9696. Args:
  9697. {input}
  9698. Keyword args:
  9699. {dtype}
  9700. {layout}
  9701. {device}
  9702. {requires_grad}
  9703. {memory_format}
  9704. Example::
  9705. >>> input = torch.empty(2, 3)
  9706. >>> torch.zeros_like(input)
  9707. tensor([[ 0., 0., 0.],
  9708. [ 0., 0., 0.]])
  9709. """.format(
  9710. **factory_like_common_args
  9711. ),
  9712. )
  9713. add_docstr(
  9714. torch.empty,
  9715. """
  9716. empty(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False, \
  9717. memory_format=torch.contiguous_format) -> Tensor
  9718. Returns a tensor filled with uninitialized data. The shape of the tensor is
  9719. defined by the variable argument :attr:`size`.
  9720. Args:
  9721. size (int...): a sequence of integers defining the shape of the output tensor.
  9722. Can be a variable number of arguments or a collection like a list or tuple.
  9723. Keyword args:
  9724. {out}
  9725. {dtype}
  9726. {layout}
  9727. {device}
  9728. {requires_grad}
  9729. {pin_memory}
  9730. {memory_format}
  9731. Example::
  9732. >>> torch.empty((2,3), dtype=torch.int64)
  9733. tensor([[ 9.4064e+13, 2.8000e+01, 9.3493e+13],
  9734. [ 7.5751e+18, 7.1428e+18, 7.5955e+18]])
  9735. """.format(
  9736. **factory_common_args
  9737. ),
  9738. )
  9739. add_docstr(
  9740. torch.empty_like,
  9741. r"""
  9742. empty_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
  9743. Returns an uninitialized tensor with the same size as :attr:`input`.
  9744. ``torch.empty_like(input)`` is equivalent to
  9745. ``torch.empty(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
  9746. Args:
  9747. {input}
  9748. Keyword args:
  9749. {dtype}
  9750. {layout}
  9751. {device}
  9752. {requires_grad}
  9753. {memory_format}
  9754. Example::
  9755. >>> a=torch.empty((2,3), dtype=torch.int32, device = 'cuda')
  9756. >>> torch.empty_like(a)
  9757. tensor([[0, 0, 0],
  9758. [0, 0, 0]], device='cuda:0', dtype=torch.int32)
  9759. """.format(
  9760. **factory_like_common_args
  9761. ),
  9762. )
  9763. add_docstr(
  9764. torch.empty_strided,
  9765. r"""
  9766. empty_strided(size, stride, *, dtype=None, layout=None, device=None, requires_grad=False, pin_memory=False) -> Tensor
  9767. Creates a tensor with the specified :attr:`size` and :attr:`stride` and filled with undefined data.
  9768. .. warning::
  9769. If the constructed tensor is "overlapped" (with multiple indices referring to the same element
  9770. in memory) its behavior is undefined.
  9771. Args:
  9772. size (tuple of int): the shape of the output tensor
  9773. stride (tuple of int): the strides of the output tensor
  9774. Keyword args:
  9775. {dtype}
  9776. {layout}
  9777. {device}
  9778. {requires_grad}
  9779. {pin_memory}
  9780. Example::
  9781. >>> a = torch.empty_strided((2, 3), (1, 2))
  9782. >>> a
  9783. tensor([[8.9683e-44, 4.4842e-44, 5.1239e+07],
  9784. [0.0000e+00, 0.0000e+00, 3.0705e-41]])
  9785. >>> a.stride()
  9786. (1, 2)
  9787. >>> a.size()
  9788. torch.Size([2, 3])
  9789. """.format(
  9790. **factory_common_args
  9791. ),
  9792. )
  9793. add_docstr(
  9794. torch.full,
  9795. r"""
  9796. full(size, fill_value, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  9797. Creates a tensor of size :attr:`size` filled with :attr:`fill_value`. The
  9798. tensor's dtype is inferred from :attr:`fill_value`.
  9799. Args:
  9800. size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
  9801. shape of the output tensor.
  9802. fill_value (Scalar): the value to fill the output tensor with.
  9803. Keyword args:
  9804. {out}
  9805. {dtype}
  9806. {layout}
  9807. {device}
  9808. {requires_grad}
  9809. Example::
  9810. >>> torch.full((2, 3), 3.141592)
  9811. tensor([[ 3.1416, 3.1416, 3.1416],
  9812. [ 3.1416, 3.1416, 3.1416]])
  9813. """.format(
  9814. **factory_common_args
  9815. ),
  9816. )
  9817. add_docstr(
  9818. torch.full_like,
  9819. """
  9820. full_like(input, fill_value, \\*, dtype=None, layout=torch.strided, device=None, requires_grad=False, \
  9821. memory_format=torch.preserve_format) -> Tensor
  9822. Returns a tensor with the same size as :attr:`input` filled with :attr:`fill_value`.
  9823. ``torch.full_like(input, fill_value)`` is equivalent to
  9824. ``torch.full(input.size(), fill_value, dtype=input.dtype, layout=input.layout, device=input.device)``.
  9825. Args:
  9826. {input}
  9827. fill_value: the number to fill the output tensor with.
  9828. Keyword args:
  9829. {dtype}
  9830. {layout}
  9831. {device}
  9832. {requires_grad}
  9833. {memory_format}
  9834. """.format(
  9835. **factory_like_common_args
  9836. ),
  9837. )
  9838. add_docstr(
  9839. torch.det,
  9840. r"""
  9841. det(input) -> Tensor
  9842. Alias for :func:`torch.linalg.det`
  9843. """,
  9844. )
  9845. add_docstr(
  9846. torch.where,
  9847. r"""
  9848. where(condition, input, other, *, out=None) -> Tensor
  9849. Return a tensor of elements selected from either :attr:`input` or :attr:`other`, depending on :attr:`condition`.
  9850. The operation is defined as:
  9851. .. math::
  9852. \text{out}_i = \begin{cases}
  9853. \text{input}_i & \text{if } \text{condition}_i \\
  9854. \text{other}_i & \text{otherwise} \\
  9855. \end{cases}
  9856. """
  9857. + r"""
  9858. .. note::
  9859. The tensors :attr:`condition`, :attr:`input`, :attr:`other` must be :ref:`broadcastable <broadcasting-semantics>`.
  9860. Arguments:
  9861. condition (BoolTensor): When True (nonzero), yield input, otherwise yield other
  9862. input (Tensor or Scalar): value (if :attr:`input` is a scalar) or values selected at indices
  9863. where :attr:`condition` is ``True``
  9864. other (Tensor or Scalar): value (if :attr:`other` is a scalar) or values selected at indices
  9865. where :attr:`condition` is ``False``
  9866. Keyword args:
  9867. {out}
  9868. Returns:
  9869. Tensor: A tensor of shape equal to the broadcasted shape of :attr:`condition`, :attr:`input`, :attr:`other`
  9870. Example::
  9871. >>> x = torch.randn(3, 2)
  9872. >>> y = torch.ones(3, 2)
  9873. >>> x
  9874. tensor([[-0.4620, 0.3139],
  9875. [ 0.3898, -0.7197],
  9876. [ 0.0478, -0.1657]])
  9877. >>> torch.where(x > 0, 1.0, 0.0)
  9878. tensor([[0., 1.],
  9879. [1., 0.],
  9880. [1., 0.]])
  9881. >>> torch.where(x > 0, x, y)
  9882. tensor([[ 1.0000, 0.3139],
  9883. [ 0.3898, 1.0000],
  9884. [ 0.0478, 1.0000]])
  9885. >>> x = torch.randn(2, 2, dtype=torch.double)
  9886. >>> x
  9887. tensor([[ 1.0779, 0.0383],
  9888. [-0.8785, -1.1089]], dtype=torch.float64)
  9889. >>> torch.where(x > 0, x, 0.)
  9890. tensor([[1.0779, 0.0383],
  9891. [0.0000, 0.0000]], dtype=torch.float64)
  9892. .. function:: where(condition) -> tuple of LongTensor
  9893. :noindex:
  9894. ``torch.where(condition)`` is identical to
  9895. ``torch.nonzero(condition, as_tuple=True)``.
  9896. .. note::
  9897. See also :func:`torch.nonzero`.
  9898. """.format(
  9899. **common_args
  9900. ),
  9901. )
  9902. add_docstr(
  9903. torch.logdet,
  9904. r"""
  9905. logdet(input) -> Tensor
  9906. Calculates log determinant of a square matrix or batches of square matrices.
  9907. It returns ``-inf`` if the input has a determinant of zero, and ``NaN`` if it has
  9908. a negative determinant.
  9909. .. note::
  9910. Backward through :meth:`logdet` internally uses SVD results when :attr:`input`
  9911. is not invertible. In this case, double backward through :meth:`logdet` will
  9912. be unstable in when :attr:`input` doesn't have distinct singular values. See
  9913. :func:`torch.linalg.svd` for details.
  9914. .. seealso::
  9915. :func:`torch.linalg.slogdet` computes the sign (resp. angle) and natural logarithm of the
  9916. absolute value of the determinant of real-valued (resp. complex) square matrices.
  9917. Arguments:
  9918. input (Tensor): the input tensor of size ``(*, n, n)`` where ``*`` is zero or more
  9919. batch dimensions.
  9920. Example::
  9921. >>> A = torch.randn(3, 3)
  9922. >>> torch.det(A)
  9923. tensor(0.2611)
  9924. >>> torch.logdet(A)
  9925. tensor(-1.3430)
  9926. >>> A
  9927. tensor([[[ 0.9254, -0.6213],
  9928. [-0.5787, 1.6843]],
  9929. [[ 0.3242, -0.9665],
  9930. [ 0.4539, -0.0887]],
  9931. [[ 1.1336, -0.4025],
  9932. [-0.7089, 0.9032]]])
  9933. >>> A.det()
  9934. tensor([1.1990, 0.4099, 0.7386])
  9935. >>> A.det().log()
  9936. tensor([ 0.1815, -0.8917, -0.3031])
  9937. """,
  9938. )
  9939. add_docstr(
  9940. torch.slogdet,
  9941. r"""
  9942. slogdet(input) -> (Tensor, Tensor)
  9943. Alias for :func:`torch.linalg.slogdet`
  9944. """,
  9945. )
  9946. add_docstr(
  9947. torch.pinverse,
  9948. r"""
  9949. pinverse(input, rcond=1e-15) -> Tensor
  9950. Alias for :func:`torch.linalg.pinv`
  9951. """,
  9952. )
  9953. add_docstr(
  9954. torch.hann_window,
  9955. """
  9956. hann_window(window_length, periodic=True, *, dtype=None, \
  9957. layout=torch.strided, device=None, requires_grad=False) -> Tensor
  9958. """
  9959. + r"""
  9960. Hann window function.
  9961. .. math::
  9962. w[n] = \frac{1}{2}\ \left[1 - \cos \left( \frac{2 \pi n}{N - 1} \right)\right] =
  9963. \sin^2 \left( \frac{\pi n}{N - 1} \right),
  9964. where :math:`N` is the full window size.
  9965. The input :attr:`window_length` is a positive integer controlling the
  9966. returned window size. :attr:`periodic` flag determines whether the returned
  9967. window trims off the last duplicate value from the symmetric window and is
  9968. ready to be used as a periodic window with functions like
  9969. :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
  9970. above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
  9971. ``torch.hann_window(L, periodic=True)`` equal to
  9972. ``torch.hann_window(L + 1, periodic=False)[:-1])``.
  9973. .. note::
  9974. If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
  9975. """
  9976. + r"""
  9977. Arguments:
  9978. window_length (int): the size of returned window
  9979. periodic (bool, optional): If True, returns a window to be used as periodic
  9980. function. If False, return a symmetric window.
  9981. Keyword args:
  9982. {dtype} Only floating point types are supported.
  9983. layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
  9984. ``torch.strided`` (dense layout) is supported.
  9985. {device}
  9986. {requires_grad}
  9987. Returns:
  9988. Tensor: A 1-D tensor of size :math:`(\text{{window\_length}},)` containing the window
  9989. """.format(
  9990. **factory_common_args
  9991. ),
  9992. )
  9993. add_docstr(
  9994. torch.hamming_window,
  9995. """
  9996. hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype=None, \
  9997. layout=torch.strided, device=None, requires_grad=False) -> Tensor
  9998. """
  9999. + r"""
  10000. Hamming window function.
  10001. .. math::
  10002. w[n] = \alpha - \beta\ \cos \left( \frac{2 \pi n}{N - 1} \right),
  10003. where :math:`N` is the full window size.
  10004. The input :attr:`window_length` is a positive integer controlling the
  10005. returned window size. :attr:`periodic` flag determines whether the returned
  10006. window trims off the last duplicate value from the symmetric window and is
  10007. ready to be used as a periodic window with functions like
  10008. :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
  10009. above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
  10010. ``torch.hamming_window(L, periodic=True)`` equal to
  10011. ``torch.hamming_window(L + 1, periodic=False)[:-1])``.
  10012. .. note::
  10013. If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
  10014. .. note::
  10015. This is a generalized version of :meth:`torch.hann_window`.
  10016. """
  10017. + r"""
  10018. Arguments:
  10019. window_length (int): the size of returned window
  10020. periodic (bool, optional): If True, returns a window to be used as periodic
  10021. function. If False, return a symmetric window.
  10022. alpha (float, optional): The coefficient :math:`\alpha` in the equation above
  10023. beta (float, optional): The coefficient :math:`\beta` in the equation above
  10024. Keyword args:
  10025. {dtype} Only floating point types are supported.
  10026. layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
  10027. ``torch.strided`` (dense layout) is supported.
  10028. {device}
  10029. {requires_grad}
  10030. Returns:
  10031. Tensor: A 1-D tensor of size :math:`(\text{{window\_length}},)` containing the window.
  10032. """.format(
  10033. **factory_common_args
  10034. ),
  10035. )
  10036. add_docstr(
  10037. torch.bartlett_window,
  10038. """
  10039. bartlett_window(window_length, periodic=True, *, dtype=None, \
  10040. layout=torch.strided, device=None, requires_grad=False) -> Tensor
  10041. """
  10042. + r"""
  10043. Bartlett window function.
  10044. .. math::
  10045. w[n] = 1 - \left| \frac{2n}{N-1} - 1 \right| = \begin{cases}
  10046. \frac{2n}{N - 1} & \text{if } 0 \leq n \leq \frac{N - 1}{2} \\
  10047. 2 - \frac{2n}{N - 1} & \text{if } \frac{N - 1}{2} < n < N \\
  10048. \end{cases},
  10049. where :math:`N` is the full window size.
  10050. The input :attr:`window_length` is a positive integer controlling the
  10051. returned window size. :attr:`periodic` flag determines whether the returned
  10052. window trims off the last duplicate value from the symmetric window and is
  10053. ready to be used as a periodic window with functions like
  10054. :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
  10055. above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
  10056. ``torch.bartlett_window(L, periodic=True)`` equal to
  10057. ``torch.bartlett_window(L + 1, periodic=False)[:-1])``.
  10058. .. note::
  10059. If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
  10060. """
  10061. + r"""
  10062. Arguments:
  10063. window_length (int): the size of returned window
  10064. periodic (bool, optional): If True, returns a window to be used as periodic
  10065. function. If False, return a symmetric window.
  10066. Keyword args:
  10067. {dtype} Only floating point types are supported.
  10068. layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
  10069. ``torch.strided`` (dense layout) is supported.
  10070. {device}
  10071. {requires_grad}
  10072. Returns:
  10073. Tensor: A 1-D tensor of size :math:`(\text{{window\_length}},)` containing the window
  10074. """.format(
  10075. **factory_common_args
  10076. ),
  10077. )
  10078. add_docstr(
  10079. torch.blackman_window,
  10080. """
  10081. blackman_window(window_length, periodic=True, *, dtype=None, \
  10082. layout=torch.strided, device=None, requires_grad=False) -> Tensor
  10083. """
  10084. + r"""
  10085. Blackman window function.
  10086. .. math::
  10087. w[n] = 0.42 - 0.5 \cos \left( \frac{2 \pi n}{N - 1} \right) + 0.08 \cos \left( \frac{4 \pi n}{N - 1} \right)
  10088. where :math:`N` is the full window size.
  10089. The input :attr:`window_length` is a positive integer controlling the
  10090. returned window size. :attr:`periodic` flag determines whether the returned
  10091. window trims off the last duplicate value from the symmetric window and is
  10092. ready to be used as a periodic window with functions like
  10093. :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
  10094. above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
  10095. ``torch.blackman_window(L, periodic=True)`` equal to
  10096. ``torch.blackman_window(L + 1, periodic=False)[:-1])``.
  10097. .. note::
  10098. If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
  10099. """
  10100. + r"""
  10101. Arguments:
  10102. window_length (int): the size of returned window
  10103. periodic (bool, optional): If True, returns a window to be used as periodic
  10104. function. If False, return a symmetric window.
  10105. Keyword args:
  10106. {dtype} Only floating point types are supported.
  10107. layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
  10108. ``torch.strided`` (dense layout) is supported.
  10109. {device}
  10110. {requires_grad}
  10111. Returns:
  10112. Tensor: A 1-D tensor of size :math:`(\text{{window\_length}},)` containing the window
  10113. """.format(
  10114. **factory_common_args
  10115. ),
  10116. )
  10117. add_docstr(
  10118. torch.kaiser_window,
  10119. """
  10120. kaiser_window(window_length, periodic=True, beta=12.0, *, dtype=None, \
  10121. layout=torch.strided, device=None, requires_grad=False) -> Tensor
  10122. """
  10123. + r"""
  10124. Computes the Kaiser window with window length :attr:`window_length` and shape parameter :attr:`beta`.
  10125. Let I_0 be the zeroth order modified Bessel function of the first kind (see :func:`torch.i0`) and
  10126. ``N = L - 1`` if :attr:`periodic` is False and ``L`` if :attr:`periodic` is True,
  10127. where ``L`` is the :attr:`window_length`. This function computes:
  10128. .. math::
  10129. out_i = I_0 \left( \beta \sqrt{1 - \left( {\frac{i - N/2}{N/2}} \right) ^2 } \right) / I_0( \beta )
  10130. Calling ``torch.kaiser_window(L, B, periodic=True)`` is equivalent to calling
  10131. ``torch.kaiser_window(L + 1, B, periodic=False)[:-1])``.
  10132. The :attr:`periodic` argument is intended as a helpful shorthand
  10133. to produce a periodic window as input to functions like :func:`torch.stft`.
  10134. .. note::
  10135. If :attr:`window_length` is one, then the returned window is a single element tensor containing a one.
  10136. """
  10137. + r"""
  10138. Args:
  10139. window_length (int): length of the window.
  10140. periodic (bool, optional): If True, returns a periodic window suitable for use in spectral analysis.
  10141. If False, returns a symmetric window suitable for use in filter design.
  10142. beta (float, optional): shape parameter for the window.
  10143. Keyword args:
  10144. {dtype}
  10145. layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
  10146. ``torch.strided`` (dense layout) is supported.
  10147. {device}
  10148. {requires_grad}
  10149. """.format(
  10150. **factory_common_args
  10151. ),
  10152. )
  10153. add_docstr(
  10154. torch.vander,
  10155. """
  10156. vander(x, N=None, increasing=False) -> Tensor
  10157. """
  10158. + r"""
  10159. Generates a Vandermonde matrix.
  10160. The columns of the output matrix are elementwise powers of the input vector :math:`x^{{(N-1)}}, x^{{(N-2)}}, ..., x^0`.
  10161. If increasing is True, the order of the columns is reversed :math:`x^0, x^1, ..., x^{{(N-1)}}`. Such a
  10162. matrix with a geometric progression in each row is named for Alexandre-Theophile Vandermonde.
  10163. Arguments:
  10164. x (Tensor): 1-D input tensor.
  10165. N (int, optional): Number of columns in the output. If N is not specified,
  10166. a square array is returned :math:`(N = len(x))`.
  10167. increasing (bool, optional): Order of the powers of the columns. If True,
  10168. the powers increase from left to right, if False (the default) they are reversed.
  10169. Returns:
  10170. Tensor: Vandermonde matrix. If increasing is False, the first column is :math:`x^{{(N-1)}}`,
  10171. the second :math:`x^{{(N-2)}}` and so forth. If increasing is True, the columns
  10172. are :math:`x^0, x^1, ..., x^{{(N-1)}}`.
  10173. Example::
  10174. >>> x = torch.tensor([1, 2, 3, 5])
  10175. >>> torch.vander(x)
  10176. tensor([[ 1, 1, 1, 1],
  10177. [ 8, 4, 2, 1],
  10178. [ 27, 9, 3, 1],
  10179. [125, 25, 5, 1]])
  10180. >>> torch.vander(x, N=3)
  10181. tensor([[ 1, 1, 1],
  10182. [ 4, 2, 1],
  10183. [ 9, 3, 1],
  10184. [25, 5, 1]])
  10185. >>> torch.vander(x, N=3, increasing=True)
  10186. tensor([[ 1, 1, 1],
  10187. [ 1, 2, 4],
  10188. [ 1, 3, 9],
  10189. [ 1, 5, 25]])
  10190. """.format(
  10191. **factory_common_args
  10192. ),
  10193. )
  10194. add_docstr(
  10195. torch.unbind,
  10196. r"""
  10197. unbind(input, dim=0) -> seq
  10198. Removes a tensor dimension.
  10199. Returns a tuple of all slices along a given dimension, already without it.
  10200. Arguments:
  10201. input (Tensor): the tensor to unbind
  10202. dim (int): dimension to remove
  10203. Example::
  10204. >>> torch.unbind(torch.tensor([[1, 2, 3],
  10205. >>> [4, 5, 6],
  10206. >>> [7, 8, 9]]))
  10207. (tensor([1, 2, 3]), tensor([4, 5, 6]), tensor([7, 8, 9]))
  10208. """,
  10209. )
  10210. add_docstr(
  10211. torch.combinations,
  10212. r"""
  10213. combinations(input, r=2, with_replacement=False) -> seq
  10214. Compute combinations of length :math:`r` of the given tensor. The behavior is similar to
  10215. python's `itertools.combinations` when `with_replacement` is set to `False`, and
  10216. `itertools.combinations_with_replacement` when `with_replacement` is set to `True`.
  10217. Arguments:
  10218. input (Tensor): 1D vector.
  10219. r (int, optional): number of elements to combine
  10220. with_replacement (bool, optional): whether to allow duplication in combination
  10221. Returns:
  10222. Tensor: A tensor equivalent to converting all the input tensors into lists, do
  10223. `itertools.combinations` or `itertools.combinations_with_replacement` on these
  10224. lists, and finally convert the resulting list into tensor.
  10225. Example::
  10226. >>> a = [1, 2, 3]
  10227. >>> list(itertools.combinations(a, r=2))
  10228. [(1, 2), (1, 3), (2, 3)]
  10229. >>> list(itertools.combinations(a, r=3))
  10230. [(1, 2, 3)]
  10231. >>> list(itertools.combinations_with_replacement(a, r=2))
  10232. [(1, 1), (1, 2), (1, 3), (2, 2), (2, 3), (3, 3)]
  10233. >>> tensor_a = torch.tensor(a)
  10234. >>> torch.combinations(tensor_a)
  10235. tensor([[1, 2],
  10236. [1, 3],
  10237. [2, 3]])
  10238. >>> torch.combinations(tensor_a, r=3)
  10239. tensor([[1, 2, 3]])
  10240. >>> torch.combinations(tensor_a, with_replacement=True)
  10241. tensor([[1, 1],
  10242. [1, 2],
  10243. [1, 3],
  10244. [2, 2],
  10245. [2, 3],
  10246. [3, 3]])
  10247. """,
  10248. )
  10249. add_docstr(
  10250. torch.trapezoid,
  10251. r"""
  10252. trapezoid(y, x=None, *, dx=None, dim=-1) -> Tensor
  10253. Computes the `trapezoidal rule <https://en.wikipedia.org/wiki/Trapezoidal_rule>`_ along
  10254. :attr:`dim`. By default the spacing between elements is assumed to be 1, but
  10255. :attr:`dx` can be used to specify a different constant spacing, and :attr:`x` can be
  10256. used to specify arbitrary spacing along :attr:`dim`.
  10257. Assuming :attr:`y` is a one-dimensional tensor with elements :math:`{y_0, y_1, ..., y_n}`,
  10258. the default computation is
  10259. .. math::
  10260. \begin{aligned}
  10261. \sum_{i = 1}^{n-1} \frac{1}{2} (y_i + y_{i-1})
  10262. \end{aligned}
  10263. When :attr:`dx` is specified the computation becomes
  10264. .. math::
  10265. \begin{aligned}
  10266. \sum_{i = 1}^{n-1} \frac{\Delta x}{2} (y_i + y_{i-1})
  10267. \end{aligned}
  10268. effectively multiplying the result by :attr:`dx`. When :attr:`x` is specified,
  10269. assuming :attr:`x` is also a one-dimensional tensor with
  10270. elements :math:`{x_0, x_1, ..., x_n}`, the computation becomes
  10271. .. math::
  10272. \begin{aligned}
  10273. \sum_{i = 1}^{n-1} \frac{(x_i - x_{i-1})}{2} (y_i + y_{i-1})
  10274. \end{aligned}
  10275. When :attr:`x` and :attr:`y` have the same size, the computation is as described above and no broadcasting is needed.
  10276. The broadcasting behavior of this function is as follows when their sizes are different. For both :attr:`x`
  10277. and :attr:`y`, the function computes the difference between consecutive elements along
  10278. dimension :attr:`dim`. This effectively creates two tensors, `x_diff` and `y_diff`, that have
  10279. the same shape as the original tensors except their lengths along the dimension :attr:`dim` is reduced by 1.
  10280. After that, those two tensors are broadcast together to compute final output as part of the trapezoidal rule.
  10281. See the examples below for details.
  10282. .. note::
  10283. The trapezoidal rule is a technique for approximating the definite integral of a function
  10284. by averaging its left and right Riemann sums. The approximation becomes more accurate as
  10285. the resolution of the partition increases.
  10286. Arguments:
  10287. y (Tensor): Values to use when computing the trapezoidal rule.
  10288. x (Tensor): If specified, defines spacing between values as specified above.
  10289. Keyword arguments:
  10290. dx (float): constant spacing between values. If neither :attr:`x` or :attr:`dx`
  10291. are specified then this defaults to 1. Effectively multiplies the result by its value.
  10292. dim (int): The dimension along which to compute the trapezoidal rule.
  10293. The last (inner-most) dimension by default.
  10294. Examples::
  10295. >>> # Computes the trapezoidal rule in 1D, spacing is implicitly 1
  10296. >>> y = torch.tensor([1, 5, 10])
  10297. >>> torch.trapezoid(y)
  10298. tensor(10.5)
  10299. >>> # Computes the same trapezoidal rule directly to verify
  10300. >>> (1 + 10 + 10) / 2
  10301. 10.5
  10302. >>> # Computes the trapezoidal rule in 1D with constant spacing of 2
  10303. >>> # NOTE: the result is the same as before, but multiplied by 2
  10304. >>> torch.trapezoid(y, dx=2)
  10305. 21.0
  10306. >>> # Computes the trapezoidal rule in 1D with arbitrary spacing
  10307. >>> x = torch.tensor([1, 3, 6])
  10308. >>> torch.trapezoid(y, x)
  10309. 28.5
  10310. >>> # Computes the same trapezoidal rule directly to verify
  10311. >>> ((3 - 1) * (1 + 5) + (6 - 3) * (5 + 10)) / 2
  10312. 28.5
  10313. >>> # Computes the trapezoidal rule for each row of a 3x3 matrix
  10314. >>> y = torch.arange(9).reshape(3, 3)
  10315. tensor([[0, 1, 2],
  10316. [3, 4, 5],
  10317. [6, 7, 8]])
  10318. >>> torch.trapezoid(y)
  10319. tensor([ 2., 8., 14.])
  10320. >>> # Computes the trapezoidal rule for each column of the matrix
  10321. >>> torch.trapezoid(y, dim=0)
  10322. tensor([ 6., 8., 10.])
  10323. >>> # Computes the trapezoidal rule for each row of a 3x3 ones matrix
  10324. >>> # with the same arbitrary spacing
  10325. >>> y = torch.ones(3, 3)
  10326. >>> x = torch.tensor([1, 3, 6])
  10327. >>> torch.trapezoid(y, x)
  10328. array([5., 5., 5.])
  10329. >>> # Computes the trapezoidal rule for each row of a 3x3 ones matrix
  10330. >>> # with different arbitrary spacing per row
  10331. >>> y = torch.ones(3, 3)
  10332. >>> x = torch.tensor([[1, 2, 3], [1, 3, 5], [1, 4, 7]])
  10333. >>> torch.trapezoid(y, x)
  10334. array([2., 4., 6.])
  10335. """,
  10336. )
  10337. add_docstr(
  10338. torch.trapz,
  10339. r"""
  10340. trapz(y, x, *, dim=-1) -> Tensor
  10341. Alias for :func:`torch.trapezoid`.
  10342. """,
  10343. )
  10344. add_docstr(
  10345. torch.cumulative_trapezoid,
  10346. r"""
  10347. cumulative_trapezoid(y, x=None, *, dx=None, dim=-1) -> Tensor
  10348. Cumulatively computes the `trapezoidal rule <https://en.wikipedia.org/wiki/Trapezoidal_rule>`_
  10349. along :attr:`dim`. By default the spacing between elements is assumed to be 1, but
  10350. :attr:`dx` can be used to specify a different constant spacing, and :attr:`x` can be
  10351. used to specify arbitrary spacing along :attr:`dim`.
  10352. For more details, please read :func:`torch.trapezoid`. The difference between :func:`torch.trapezoid`
  10353. and this function is that, :func:`torch.trapezoid` returns a value for each integration,
  10354. where as this function returns a cumulative value for every spacing within the integration. This
  10355. is analogous to how `.sum` returns a value and `.cumsum` returns a cumulative sum.
  10356. Arguments:
  10357. y (Tensor): Values to use when computing the trapezoidal rule.
  10358. x (Tensor): If specified, defines spacing between values as specified above.
  10359. Keyword arguments:
  10360. dx (float): constant spacing between values. If neither :attr:`x` or :attr:`dx`
  10361. are specified then this defaults to 1. Effectively multiplies the result by its value.
  10362. dim (int): The dimension along which to compute the trapezoidal rule.
  10363. The last (inner-most) dimension by default.
  10364. Examples::
  10365. >>> # Cumulatively computes the trapezoidal rule in 1D, spacing is implicitly 1.
  10366. >>> y = torch.tensor([1, 5, 10])
  10367. >>> torch.cumulative_trapezoid(y)
  10368. tensor([3., 10.5])
  10369. >>> # Computes the same trapezoidal rule directly up to each element to verify
  10370. >>> (1 + 5) / 2
  10371. 3.0
  10372. >>> (1 + 10 + 10) / 2
  10373. 10.5
  10374. >>> # Cumulatively computes the trapezoidal rule in 1D with constant spacing of 2
  10375. >>> # NOTE: the result is the same as before, but multiplied by 2
  10376. >>> torch.cumulative_trapezoid(y, dx=2)
  10377. tensor([6., 21.])
  10378. >>> # Cumulatively computes the trapezoidal rule in 1D with arbitrary spacing
  10379. >>> x = torch.tensor([1, 3, 6])
  10380. >>> torch.cumulative_trapezoid(y, x)
  10381. tensor([6., 28.5])
  10382. >>> # Computes the same trapezoidal rule directly up to each element to verify
  10383. >>> ((3 - 1) * (1 + 5)) / 2
  10384. 6.0
  10385. >>> ((3 - 1) * (1 + 5) + (6 - 3) * (5 + 10)) / 2
  10386. 28.5
  10387. >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 matrix
  10388. >>> y = torch.arange(9).reshape(3, 3)
  10389. tensor([[0, 1, 2],
  10390. [3, 4, 5],
  10391. [6, 7, 8]])
  10392. >>> torch.cumulative_trapezoid(y)
  10393. tensor([[ 0.5, 2.],
  10394. [ 3.5, 8.],
  10395. [ 6.5, 14.]])
  10396. >>> # Cumulatively computes the trapezoidal rule for each column of the matrix
  10397. >>> torch.cumulative_trapezoid(y, dim=0)
  10398. tensor([[ 1.5, 2.5, 3.5],
  10399. [ 6.0, 8.0, 10.0]])
  10400. >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 ones matrix
  10401. >>> # with the same arbitrary spacing
  10402. >>> y = torch.ones(3, 3)
  10403. >>> x = torch.tensor([1, 3, 6])
  10404. >>> torch.cumulative_trapezoid(y, x)
  10405. tensor([[2., 5.],
  10406. [2., 5.],
  10407. [2., 5.]])
  10408. >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 ones matrix
  10409. >>> # with different arbitrary spacing per row
  10410. >>> y = torch.ones(3, 3)
  10411. >>> x = torch.tensor([[1, 2, 3], [1, 3, 5], [1, 4, 7]])
  10412. >>> torch.cumulative_trapezoid(y, x)
  10413. tensor([[1., 2.],
  10414. [2., 4.],
  10415. [3., 6.]])
  10416. """,
  10417. )
  10418. add_docstr(
  10419. torch.repeat_interleave,
  10420. r"""
  10421. repeat_interleave(input, repeats, dim=None, *, output_size=None) -> Tensor
  10422. Repeat elements of a tensor.
  10423. .. warning::
  10424. This is different from :meth:`torch.Tensor.repeat` but similar to ``numpy.repeat``.
  10425. Args:
  10426. {input}
  10427. repeats (Tensor or int): The number of repetitions for each element.
  10428. repeats is broadcasted to fit the shape of the given axis.
  10429. dim (int, optional): The dimension along which to repeat values.
  10430. By default, use the flattened input array, and return a flat output
  10431. array.
  10432. Keyword args:
  10433. output_size (int, optional): Total output size for the given axis
  10434. ( e.g. sum of repeats). If given, it will avoid stream synchronization
  10435. needed to calculate output shape of the tensor.
  10436. Returns:
  10437. Tensor: Repeated tensor which has the same shape as input, except along the given axis.
  10438. Example::
  10439. >>> x = torch.tensor([1, 2, 3])
  10440. >>> x.repeat_interleave(2)
  10441. tensor([1, 1, 2, 2, 3, 3])
  10442. >>> y = torch.tensor([[1, 2], [3, 4]])
  10443. >>> torch.repeat_interleave(y, 2)
  10444. tensor([1, 1, 2, 2, 3, 3, 4, 4])
  10445. >>> torch.repeat_interleave(y, 3, dim=1)
  10446. tensor([[1, 1, 1, 2, 2, 2],
  10447. [3, 3, 3, 4, 4, 4]])
  10448. >>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0)
  10449. tensor([[1, 2],
  10450. [3, 4],
  10451. [3, 4]])
  10452. >>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0, output_size=3)
  10453. tensor([[1, 2],
  10454. [3, 4],
  10455. [3, 4]])
  10456. .. function:: repeat_interleave(repeats, *, output_size=None) -> Tensor
  10457. :noindex:
  10458. If the `repeats` is `tensor([n1, n2, n3, ...])`, then the output will be
  10459. `tensor([0, 0, ..., 1, 1, ..., 2, 2, ..., ...])` where `0` appears `n1` times,
  10460. `1` appears `n2` times, `2` appears `n3` times, etc.
  10461. """.format(
  10462. **common_args
  10463. ),
  10464. )
  10465. add_docstr(
  10466. torch.tile,
  10467. r"""
  10468. tile(input, dims) -> Tensor
  10469. Constructs a tensor by repeating the elements of :attr:`input`.
  10470. The :attr:`dims` argument specifies the number of repetitions
  10471. in each dimension.
  10472. If :attr:`dims` specifies fewer dimensions than :attr:`input` has, then
  10473. ones are prepended to :attr:`dims` until all dimensions are specified.
  10474. For example, if :attr:`input` has shape (8, 6, 4, 2) and :attr:`dims`
  10475. is (2, 2), then :attr:`dims` is treated as (1, 1, 2, 2).
  10476. Analogously, if :attr:`input` has fewer dimensions than :attr:`dims`
  10477. specifies, then :attr:`input` is treated as if it were unsqueezed at
  10478. dimension zero until it has as many dimensions as :attr:`dims` specifies.
  10479. For example, if :attr:`input` has shape (4, 2) and :attr:`dims`
  10480. is (3, 3, 2, 2), then :attr:`input` is treated as if it had the
  10481. shape (1, 1, 4, 2).
  10482. .. note::
  10483. This function is similar to NumPy's tile function.
  10484. Args:
  10485. input (Tensor): the tensor whose elements to repeat.
  10486. dims (tuple): the number of repetitions per dimension.
  10487. Example::
  10488. >>> x = torch.tensor([1, 2, 3])
  10489. >>> x.tile((2,))
  10490. tensor([1, 2, 3, 1, 2, 3])
  10491. >>> y = torch.tensor([[1, 2], [3, 4]])
  10492. >>> torch.tile(y, (2, 2))
  10493. tensor([[1, 2, 1, 2],
  10494. [3, 4, 3, 4],
  10495. [1, 2, 1, 2],
  10496. [3, 4, 3, 4]])
  10497. """,
  10498. )
  10499. add_docstr(
  10500. torch.quantize_per_tensor,
  10501. r"""
  10502. quantize_per_tensor(input, scale, zero_point, dtype) -> Tensor
  10503. Converts a float tensor to a quantized tensor with given scale and zero point.
  10504. Arguments:
  10505. input (Tensor): float tensor or list of tensors to quantize
  10506. scale (float or Tensor): scale to apply in quantization formula
  10507. zero_point (int or Tensor): offset in integer value that maps to float zero
  10508. dtype (:class:`torch.dtype`): the desired data type of returned tensor.
  10509. Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32``
  10510. Returns:
  10511. Tensor: A newly quantized tensor or list of quantized tensors.
  10512. Example::
  10513. >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8)
  10514. tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8,
  10515. quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10)
  10516. >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8).int_repr()
  10517. tensor([ 0, 10, 20, 30], dtype=torch.uint8)
  10518. >>> torch.quantize_per_tensor([torch.tensor([-1.0, 0.0]), torch.tensor([-2.0, 2.0])],
  10519. >>> torch.tensor([0.1, 0.2]), torch.tensor([10, 20]), torch.quint8)
  10520. (tensor([-1., 0.], size=(2,), dtype=torch.quint8,
  10521. quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10),
  10522. tensor([-2., 2.], size=(2,), dtype=torch.quint8,
  10523. quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=20))
  10524. >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), torch.tensor(0.1), torch.tensor(10), torch.quint8)
  10525. tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8,
  10526. quantization_scheme=torch.per_tensor_affine, scale=0.10, zero_point=10)
  10527. """,
  10528. )
  10529. add_docstr(
  10530. torch.quantize_per_tensor_dynamic,
  10531. r"""
  10532. quantize_per_tensor_dynamic(input, dtype, reduce_range) -> Tensor
  10533. Converts a float tensor to a quantized tensor with scale and zero_point calculated
  10534. dynamically based on the input.
  10535. Arguments:
  10536. input (Tensor): float tensor or list of tensors to quantize
  10537. dtype (:class:`torch.dtype`): the desired data type of returned tensor.
  10538. Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``
  10539. reduce_range (bool): a flag to indicate whether to reduce the range of quantized
  10540. data by 1 bit, it's required to avoid instruction overflow for some hardwares
  10541. Returns:
  10542. Tensor: A newly (dynamically) quantized tensor
  10543. Example::
  10544. >>> t = torch.quantize_per_tensor_dynamic(torch.tensor([-1.0, 0.0, 1.0, 2.0]), torch.quint8, False)
  10545. >>> print(t)
  10546. tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8,
  10547. quantization_scheme=torch.per_tensor_affine, scale=0.011764705882352941,
  10548. zero_point=85)
  10549. >>> t.int_repr()
  10550. tensor([ 0, 85, 170, 255], dtype=torch.uint8)
  10551. """,
  10552. )
  10553. add_docstr(
  10554. torch.quantize_per_channel,
  10555. r"""
  10556. quantize_per_channel(input, scales, zero_points, axis, dtype) -> Tensor
  10557. Converts a float tensor to a per-channel quantized tensor with given scales and zero points.
  10558. Arguments:
  10559. input (Tensor): float tensor to quantize
  10560. scales (Tensor): float 1D tensor of scales to use, size should match ``input.size(axis)``
  10561. zero_points (int): integer 1D tensor of offset to use, size should match ``input.size(axis)``
  10562. axis (int): dimension on which apply per-channel quantization
  10563. dtype (:class:`torch.dtype`): the desired data type of returned tensor.
  10564. Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32``
  10565. Returns:
  10566. Tensor: A newly quantized tensor
  10567. Example::
  10568. >>> x = torch.tensor([[-1.0, 0.0], [1.0, 2.0]])
  10569. >>> torch.quantize_per_channel(x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8)
  10570. tensor([[-1., 0.],
  10571. [ 1., 2.]], size=(2, 2), dtype=torch.quint8,
  10572. quantization_scheme=torch.per_channel_affine,
  10573. scale=tensor([0.1000, 0.0100], dtype=torch.float64),
  10574. zero_point=tensor([10, 0]), axis=0)
  10575. >>> torch.quantize_per_channel(x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8).int_repr()
  10576. tensor([[ 0, 10],
  10577. [100, 200]], dtype=torch.uint8)
  10578. """,
  10579. )
  10580. add_docstr(
  10581. torch.quantized_batch_norm,
  10582. r"""
  10583. quantized_batch_norm(input, weight=None, bias=None, mean, var, eps, output_scale, output_zero_point) -> Tensor
  10584. Applies batch normalization on a 4D (NCHW) quantized tensor.
  10585. .. math::
  10586. y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
  10587. Arguments:
  10588. input (Tensor): quantized tensor
  10589. weight (Tensor): float tensor that corresponds to the gamma, size C
  10590. bias (Tensor): float tensor that corresponds to the beta, size C
  10591. mean (Tensor): float mean value in batch normalization, size C
  10592. var (Tensor): float tensor for variance, size C
  10593. eps (float): a value added to the denominator for numerical stability.
  10594. output_scale (float): output quantized tensor scale
  10595. output_zero_point (int): output quantized tensor zero_point
  10596. Returns:
  10597. Tensor: A quantized tensor with batch normalization applied.
  10598. Example::
  10599. >>> qx = torch.quantize_per_tensor(torch.rand(2, 2, 2, 2), 1.5, 3, torch.quint8)
  10600. >>> torch.quantized_batch_norm(qx, torch.ones(2), torch.zeros(2), torch.rand(2), torch.rand(2), 0.00001, 0.2, 2)
  10601. tensor([[[[-0.2000, -0.2000],
  10602. [ 1.6000, -0.2000]],
  10603. [[-0.4000, -0.4000],
  10604. [-0.4000, 0.6000]]],
  10605. [[[-0.2000, -0.2000],
  10606. [-0.2000, -0.2000]],
  10607. [[ 0.6000, -0.4000],
  10608. [ 0.6000, -0.4000]]]], size=(2, 2, 2, 2), dtype=torch.quint8,
  10609. quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=2)
  10610. """,
  10611. )
  10612. add_docstr(
  10613. torch.quantized_max_pool1d,
  10614. r"""
  10615. quantized_max_pool1d(input, kernel_size, stride=[], padding=0, dilation=1, ceil_mode=False) -> Tensor
  10616. Applies a 1D max pooling over an input quantized tensor composed of several input planes.
  10617. Arguments:
  10618. input (Tensor): quantized tensor
  10619. kernel_size (list of int): the size of the sliding window
  10620. stride (``list of int``, optional): the stride of the sliding window
  10621. padding (``list of int``, optional): padding to be added on both sides, must be >= 0 and <= kernel_size / 2
  10622. dilation (``list of int``, optional): The stride between elements within a sliding window, must be > 0. Default 1
  10623. ceil_mode (bool, optional): If True, will use ceil instead of floor to compute the output shape.
  10624. Defaults to False.
  10625. Returns:
  10626. Tensor: A quantized tensor with max_pool1d applied.
  10627. Example::
  10628. >>> qx = torch.quantize_per_tensor(torch.rand(2, 2), 1.5, 3, torch.quint8)
  10629. >>> torch.quantized_max_pool1d(qx, [2])
  10630. tensor([[0.0000],
  10631. [1.5000]], size=(2, 1), dtype=torch.quint8,
  10632. quantization_scheme=torch.per_tensor_affine, scale=1.5, zero_point=3)
  10633. """,
  10634. )
  10635. add_docstr(
  10636. torch.quantized_max_pool2d,
  10637. r"""
  10638. quantized_max_pool2d(input, kernel_size, stride=[], padding=0, dilation=1, ceil_mode=False) -> Tensor
  10639. Applies a 2D max pooling over an input quantized tensor composed of several input planes.
  10640. Arguments:
  10641. input (Tensor): quantized tensor
  10642. kernel_size (``list of int``): the size of the sliding window
  10643. stride (``list of int``, optional): the stride of the sliding window
  10644. padding (``list of int``, optional): padding to be added on both sides, must be >= 0 and <= kernel_size / 2
  10645. dilation (``list of int``, optional): The stride between elements within a sliding window, must be > 0. Default 1
  10646. ceil_mode (bool, optional): If True, will use ceil instead of floor to compute the output shape.
  10647. Defaults to False.
  10648. Returns:
  10649. Tensor: A quantized tensor with max_pool2d applied.
  10650. Example::
  10651. >>> qx = torch.quantize_per_tensor(torch.rand(2, 2, 2, 2), 1.5, 3, torch.quint8)
  10652. >>> torch.quantized_max_pool2d(qx, [2,2])
  10653. tensor([[[[1.5000]],
  10654. [[1.5000]]],
  10655. [[[0.0000]],
  10656. [[0.0000]]]], size=(2, 2, 1, 1), dtype=torch.quint8,
  10657. quantization_scheme=torch.per_tensor_affine, scale=1.5, zero_point=3)
  10658. """,
  10659. )
  10660. add_docstr(
  10661. torch.Generator,
  10662. r"""
  10663. Generator(device='cpu') -> Generator
  10664. Creates and returns a generator object that manages the state of the algorithm which
  10665. produces pseudo random numbers. Used as a keyword argument in many :ref:`inplace-random-sampling`
  10666. functions.
  10667. Arguments:
  10668. device (:class:`torch.device`, optional): the desired device for the generator.
  10669. Returns:
  10670. Generator: An torch.Generator object.
  10671. Example::
  10672. >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
  10673. >>> g_cpu = torch.Generator()
  10674. >>> g_cuda = torch.Generator(device='cuda')
  10675. """,
  10676. )
  10677. add_docstr(
  10678. torch.Generator.set_state,
  10679. r"""
  10680. Generator.set_state(new_state) -> void
  10681. Sets the Generator state.
  10682. Arguments:
  10683. new_state (torch.ByteTensor): The desired state.
  10684. Example::
  10685. >>> g_cpu = torch.Generator()
  10686. >>> g_cpu_other = torch.Generator()
  10687. >>> g_cpu.set_state(g_cpu_other.get_state())
  10688. """,
  10689. )
  10690. add_docstr(
  10691. torch.Generator.get_state,
  10692. r"""
  10693. Generator.get_state() -> Tensor
  10694. Returns the Generator state as a ``torch.ByteTensor``.
  10695. Returns:
  10696. Tensor: A ``torch.ByteTensor`` which contains all the necessary bits
  10697. to restore a Generator to a specific point in time.
  10698. Example::
  10699. >>> g_cpu = torch.Generator()
  10700. >>> g_cpu.get_state()
  10701. """,
  10702. )
  10703. add_docstr(
  10704. torch.Generator.manual_seed,
  10705. r"""
  10706. Generator.manual_seed(seed) -> Generator
  10707. Sets the seed for generating random numbers. Returns a `torch.Generator` object.
  10708. It is recommended to set a large seed, i.e. a number that has a good balance of 0
  10709. and 1 bits. Avoid having many 0 bits in the seed.
  10710. Arguments:
  10711. seed (int): The desired seed. Value must be within the inclusive range
  10712. `[-0x8000_0000_0000_0000, 0xffff_ffff_ffff_ffff]`. Otherwise, a RuntimeError
  10713. is raised. Negative inputs are remapped to positive values with the formula
  10714. `0xffff_ffff_ffff_ffff + seed`.
  10715. Returns:
  10716. Generator: An torch.Generator object.
  10717. Example::
  10718. >>> g_cpu = torch.Generator()
  10719. >>> g_cpu.manual_seed(2147483647)
  10720. """,
  10721. )
  10722. add_docstr(
  10723. torch.Generator.initial_seed,
  10724. r"""
  10725. Generator.initial_seed() -> int
  10726. Returns the initial seed for generating random numbers.
  10727. Example::
  10728. >>> g_cpu = torch.Generator()
  10729. >>> g_cpu.initial_seed()
  10730. 2147483647
  10731. """,
  10732. )
  10733. add_docstr(
  10734. torch.Generator.seed,
  10735. r"""
  10736. Generator.seed() -> int
  10737. Gets a non-deterministic random number from std::random_device or the current
  10738. time and uses it to seed a Generator.
  10739. Example::
  10740. >>> g_cpu = torch.Generator()
  10741. >>> g_cpu.seed()
  10742. 1516516984916
  10743. """,
  10744. )
  10745. add_docstr(
  10746. torch.Generator.device,
  10747. r"""
  10748. Generator.device -> device
  10749. Gets the current device of the generator.
  10750. Example::
  10751. >>> g_cpu = torch.Generator()
  10752. >>> g_cpu.device
  10753. device(type='cpu')
  10754. """,
  10755. )
  10756. add_docstr(
  10757. torch._assert_async,
  10758. r"""
  10759. _assert_async(tensor) -> void
  10760. Asynchronously assert that the contents of tensor are nonzero. For CPU tensors,
  10761. this is equivalent to ``assert tensor`` or ``assert tensor.is_nonzero()``; for
  10762. CUDA tensors, we DO NOT synchronize and you may only find out the assertion
  10763. failed at a later CUDA kernel launch. Asynchronous assertion can be helpful for
  10764. testing invariants in CUDA tensors without giving up performance. This function
  10765. is NOT intended to be used for regular error checking, as it will trash your CUDA
  10766. context if the assert fails (forcing you to restart your PyTorch process.)
  10767. Args:
  10768. tensor (Tensor): a one element tensor to test to see if it is nonzero. Zero
  10769. elements (including False for boolean tensors) cause an assertion failure
  10770. to be raised.
  10771. """,
  10772. )
  10773. add_docstr(
  10774. torch.searchsorted,
  10775. r"""
  10776. searchsorted(sorted_sequence, values, *, out_int32=False, right=False, side='left', out=None, sorter=None) -> Tensor
  10777. Find the indices from the *innermost* dimension of :attr:`sorted_sequence` such that, if the
  10778. corresponding values in :attr:`values` were inserted before the indices, when sorted, the order
  10779. of the corresponding *innermost* dimension within :attr:`sorted_sequence` would be preserved.
  10780. Return a new tensor with the same size as :attr:`values`. If :attr:`right` is False or side is
  10781. 'left (default), then the left boundary of :attr:`sorted_sequence` is closed. More formally,
  10782. the returned index satisfies the following rules:
  10783. .. list-table::
  10784. :widths: 12 10 78
  10785. :header-rows: 1
  10786. * - :attr:`sorted_sequence`
  10787. - :attr:`right`
  10788. - *returned index satisfies*
  10789. * - 1-D
  10790. - False
  10791. - ``sorted_sequence[i-1] < values[m][n]...[l][x] <= sorted_sequence[i]``
  10792. * - 1-D
  10793. - True
  10794. - ``sorted_sequence[i-1] <= values[m][n]...[l][x] < sorted_sequence[i]``
  10795. * - N-D
  10796. - False
  10797. - ``sorted_sequence[m][n]...[l][i-1] < values[m][n]...[l][x] <= sorted_sequence[m][n]...[l][i]``
  10798. * - N-D
  10799. - True
  10800. - ``sorted_sequence[m][n]...[l][i-1] <= values[m][n]...[l][x] < sorted_sequence[m][n]...[l][i]``
  10801. Args:
  10802. sorted_sequence (Tensor): N-D or 1-D tensor, containing monotonically increasing sequence on the *innermost*
  10803. dimension unless :attr:`sorter` is provided, in which case the sequence does not
  10804. need to be sorted
  10805. values (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s).
  10806. Keyword args:
  10807. out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise.
  10808. Default value is False, i.e. default output data type is torch.int64.
  10809. right (bool, optional): if False, return the first suitable location that is found. If True, return the
  10810. last such index. If no suitable index found, return 0 for non-numerical value
  10811. (eg. nan, inf) or the size of *innermost* dimension within :attr:`sorted_sequence`
  10812. (one pass the last index of the *innermost* dimension). In other words, if False,
  10813. gets the lower bound index for each value in :attr:`values` on the corresponding
  10814. *innermost* dimension of the :attr:`sorted_sequence`. If True, gets the upper
  10815. bound index instead. Default value is False. :attr:`side` does the same and is
  10816. preferred. It will error if :attr:`side` is set to "left" while this is True.
  10817. side (str, optional): the same as :attr:`right` but preferred. "left" corresponds to False for :attr:`right`
  10818. and "right" corresponds to True for :attr:`right`. It will error if this is set to
  10819. "left" while :attr:`right` is True.
  10820. out (Tensor, optional): the output tensor, must be the same size as :attr:`values` if provided.
  10821. sorter (LongTensor, optional): if provided, a tensor matching the shape of the unsorted
  10822. :attr:`sorted_sequence` containing a sequence of indices that sort it in the
  10823. ascending order on the innermost dimension
  10824. Example::
  10825. >>> sorted_sequence = torch.tensor([[1, 3, 5, 7, 9], [2, 4, 6, 8, 10]])
  10826. >>> sorted_sequence
  10827. tensor([[ 1, 3, 5, 7, 9],
  10828. [ 2, 4, 6, 8, 10]])
  10829. >>> values = torch.tensor([[3, 6, 9], [3, 6, 9]])
  10830. >>> values
  10831. tensor([[3, 6, 9],
  10832. [3, 6, 9]])
  10833. >>> torch.searchsorted(sorted_sequence, values)
  10834. tensor([[1, 3, 4],
  10835. [1, 2, 4]])
  10836. >>> torch.searchsorted(sorted_sequence, values, side='right')
  10837. tensor([[2, 3, 5],
  10838. [1, 3, 4]])
  10839. >>> sorted_sequence_1d = torch.tensor([1, 3, 5, 7, 9])
  10840. >>> sorted_sequence_1d
  10841. tensor([1, 3, 5, 7, 9])
  10842. >>> torch.searchsorted(sorted_sequence_1d, values)
  10843. tensor([[1, 3, 4],
  10844. [1, 3, 4]])
  10845. """,
  10846. )
  10847. add_docstr(
  10848. torch.bucketize,
  10849. r"""
  10850. bucketize(input, boundaries, *, out_int32=False, right=False, out=None) -> Tensor
  10851. Returns the indices of the buckets to which each value in the :attr:`input` belongs, where the
  10852. boundaries of the buckets are set by :attr:`boundaries`. Return a new tensor with the same size
  10853. as :attr:`input`. If :attr:`right` is False (default), then the left boundary is closed. More
  10854. formally, the returned index satisfies the following rules:
  10855. .. list-table::
  10856. :widths: 15 85
  10857. :header-rows: 1
  10858. * - :attr:`right`
  10859. - *returned index satisfies*
  10860. * - False
  10861. - ``boundaries[i-1] < input[m][n]...[l][x] <= boundaries[i]``
  10862. * - True
  10863. - ``boundaries[i-1] <= input[m][n]...[l][x] < boundaries[i]``
  10864. Args:
  10865. input (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s).
  10866. boundaries (Tensor): 1-D tensor, must contain a strictly increasing sequence, or the return value is undefined.
  10867. Keyword args:
  10868. out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise.
  10869. Default value is False, i.e. default output data type is torch.int64.
  10870. right (bool, optional): if False, return the first suitable location that is found. If True, return the
  10871. last such index. If no suitable index found, return 0 for non-numerical value
  10872. (eg. nan, inf) or the size of :attr:`boundaries` (one pass the last index).
  10873. In other words, if False, gets the lower bound index for each value in :attr:`input`
  10874. from :attr:`boundaries`. If True, gets the upper bound index instead.
  10875. Default value is False.
  10876. out (Tensor, optional): the output tensor, must be the same size as :attr:`input` if provided.
  10877. Example::
  10878. >>> boundaries = torch.tensor([1, 3, 5, 7, 9])
  10879. >>> boundaries
  10880. tensor([1, 3, 5, 7, 9])
  10881. >>> v = torch.tensor([[3, 6, 9], [3, 6, 9]])
  10882. >>> v
  10883. tensor([[3, 6, 9],
  10884. [3, 6, 9]])
  10885. >>> torch.bucketize(v, boundaries)
  10886. tensor([[1, 3, 4],
  10887. [1, 3, 4]])
  10888. >>> torch.bucketize(v, boundaries, right=True)
  10889. tensor([[2, 3, 5],
  10890. [2, 3, 5]])
  10891. """,
  10892. )
  10893. add_docstr(
  10894. torch.view_as_real_copy,
  10895. r"""
  10896. Performs the same operation as :func:`torch.view_as_real`, but all output tensors
  10897. are freshly created instead of aliasing the input.
  10898. """,
  10899. )
  10900. add_docstr(
  10901. torch.view_as_complex_copy,
  10902. r"""
  10903. Performs the same operation as :func:`torch.view_as_complex`, but all output tensors
  10904. are freshly created instead of aliasing the input.
  10905. """,
  10906. )
  10907. add_docstr(
  10908. torch.as_strided_copy,
  10909. r"""
  10910. Performs the same operation as :func:`torch.as_strided`, but all output tensors
  10911. are freshly created instead of aliasing the input.
  10912. """,
  10913. )
  10914. add_docstr(
  10915. torch.diagonal_copy,
  10916. r"""
  10917. Performs the same operation as :func:`torch.diagonal`, but all output tensors
  10918. are freshly created instead of aliasing the input.
  10919. """,
  10920. )
  10921. add_docstr(
  10922. torch.expand_copy,
  10923. r"""
  10924. Performs the same operation as :func:`torch.expand`, but all output tensors
  10925. are freshly created instead of aliasing the input.
  10926. """,
  10927. )
  10928. add_docstr(
  10929. torch.permute_copy,
  10930. r"""
  10931. Performs the same operation as :func:`torch.permute`, but all output tensors
  10932. are freshly created instead of aliasing the input.
  10933. """,
  10934. )
  10935. add_docstr(
  10936. torch.select_copy,
  10937. r"""
  10938. Performs the same operation as :func:`torch.select`, but all output tensors
  10939. are freshly created instead of aliasing the input.
  10940. """,
  10941. )
  10942. add_docstr(
  10943. torch.detach_copy,
  10944. r"""
  10945. Performs the same operation as :func:`torch.detach`, but all output tensors
  10946. are freshly created instead of aliasing the input.
  10947. """,
  10948. )
  10949. add_docstr(
  10950. torch.slice_copy,
  10951. r"""
  10952. Performs the same operation as :func:`torch.slice`, but all output tensors
  10953. are freshly created instead of aliasing the input.
  10954. """,
  10955. )
  10956. add_docstr(
  10957. torch.split_copy,
  10958. r"""
  10959. Performs the same operation as :func:`torch.split`, but all output tensors
  10960. are freshly created instead of aliasing the input.
  10961. """,
  10962. )
  10963. add_docstr(
  10964. torch.split_with_sizes_copy,
  10965. r"""
  10966. Performs the same operation as :func:`torch.split_with_sizes`, but all output tensors
  10967. are freshly created instead of aliasing the input.
  10968. """,
  10969. )
  10970. add_docstr(
  10971. torch.squeeze_copy,
  10972. r"""
  10973. Performs the same operation as :func:`torch.squeeze`, but all output tensors
  10974. are freshly created instead of aliasing the input.
  10975. """,
  10976. )
  10977. add_docstr(
  10978. torch.t_copy,
  10979. r"""
  10980. Performs the same operation as :func:`torch.t`, but all output tensors
  10981. are freshly created instead of aliasing the input.
  10982. """,
  10983. )
  10984. add_docstr(
  10985. torch.transpose_copy,
  10986. r"""
  10987. Performs the same operation as :func:`torch.transpose`, but all output tensors
  10988. are freshly created instead of aliasing the input.
  10989. """,
  10990. )
  10991. add_docstr(
  10992. torch.unsqueeze_copy,
  10993. r"""
  10994. Performs the same operation as :func:`torch.unsqueeze`, but all output tensors
  10995. are freshly created instead of aliasing the input.
  10996. """,
  10997. )
  10998. add_docstr(
  10999. torch.indices_copy,
  11000. r"""
  11001. Performs the same operation as :func:`torch.indices`, but all output tensors
  11002. are freshly created instead of aliasing the input.
  11003. """,
  11004. )
  11005. add_docstr(
  11006. torch.values_copy,
  11007. r"""
  11008. Performs the same operation as :func:`torch.values`, but all output tensors
  11009. are freshly created instead of aliasing the input.
  11010. """,
  11011. )
  11012. add_docstr(
  11013. torch.crow_indices_copy,
  11014. r"""
  11015. Performs the same operation as :func:`torch.crow_indices`, but all output tensors
  11016. are freshly created instead of aliasing the input.
  11017. """,
  11018. )
  11019. add_docstr(
  11020. torch.col_indices_copy,
  11021. r"""
  11022. Performs the same operation as :func:`torch.col_indices`, but all output tensors
  11023. are freshly created instead of aliasing the input.
  11024. """,
  11025. )
  11026. add_docstr(
  11027. torch.unbind_copy,
  11028. r"""
  11029. Performs the same operation as :func:`torch.unbind`, but all output tensors
  11030. are freshly created instead of aliasing the input.
  11031. """,
  11032. )
  11033. add_docstr(
  11034. torch.view_copy,
  11035. r"""
  11036. Performs the same operation as :func:`torch.view`, but all output tensors
  11037. are freshly created instead of aliasing the input.
  11038. """,
  11039. )
  11040. add_docstr(
  11041. torch.unfold_copy,
  11042. r"""
  11043. Performs the same operation as :func:`torch.unfold`, but all output tensors
  11044. are freshly created instead of aliasing the input.
  11045. """,
  11046. )
  11047. add_docstr(
  11048. torch.alias_copy,
  11049. r"""
  11050. Performs the same operation as :func:`torch.alias`, but all output tensors
  11051. are freshly created instead of aliasing the input.
  11052. """,
  11053. )
  11054. for unary_base_func_name in (
  11055. "exp",
  11056. "sqrt",
  11057. "abs",
  11058. "acos",
  11059. "asin",
  11060. "atan",
  11061. "ceil",
  11062. "cos",
  11063. "cosh",
  11064. "erf",
  11065. "erfc",
  11066. "expm1",
  11067. "floor",
  11068. "log",
  11069. "log10",
  11070. "log1p",
  11071. "log2",
  11072. "neg",
  11073. "tan",
  11074. "tanh",
  11075. "sin",
  11076. "sinh",
  11077. "round",
  11078. "lgamma",
  11079. "frac",
  11080. "reciprocal",
  11081. "sigmoid",
  11082. "trunc",
  11083. "zero",
  11084. ):
  11085. unary_foreach_func_name = f"_foreach_{unary_base_func_name}"
  11086. if hasattr(torch, unary_foreach_func_name):
  11087. add_docstr(
  11088. getattr(torch, unary_foreach_func_name),
  11089. r"""
  11090. {}(self: List[Tensor]) -> List[Tensor]
  11091. Apply :func:`torch.{}` to each Tensor of the input list.
  11092. """.format(
  11093. unary_foreach_func_name, unary_base_func_name
  11094. ),
  11095. )
  11096. unary_inplace_foreach_func_name = f"{unary_foreach_func_name}_"
  11097. if hasattr(torch, unary_inplace_foreach_func_name):
  11098. add_docstr(
  11099. getattr(torch, unary_inplace_foreach_func_name),
  11100. r"""
  11101. {}(self: List[Tensor]) -> None
  11102. Apply :func:`torch.{}` to each Tensor of the input list.
  11103. """.format(
  11104. unary_inplace_foreach_func_name, unary_base_func_name
  11105. ),
  11106. )