12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906109071090810909109101091110912109131091410915109161091710918109191092010921109221092310924109251092610927109281092910930109311093210933109341093510936109371093810939109401094110942109431094410945109461094710948109491095010951109521095310954109551095610957109581095910960109611096210963109641096510966109671096810969109701097110972109731097410975109761097710978109791098010981109821098310984109851098610987109881098910990109911099210993109941099510996109971099810999110001100111002110031100411005110061100711008110091101011011110121101311014110151101611017110181101911020110211102211023110241102511026110271102811029110301103111032110331103411035110361103711038110391104011041110421104311044110451104611047110481104911050110511105211053110541105511056110571105811059110601106111062110631106411065110661106711068110691107011071110721107311074110751107611077110781107911080110811108211083110841108511086110871108811089110901109111092110931109411095110961109711098110991110011101111021110311104111051110611107111081110911110111111111211113111141111511116111171111811119111201112111122111231112411125111261112711128111291113011131111321113311134111351113611137111381113911140111411114211143111441114511146111471114811149111501115111152111531115411155111561115711158111591116011161111621116311164111651116611167111681116911170111711117211173111741117511176111771117811179111801118111182111831118411185111861118711188111891119011191111921119311194111951119611197111981119911200112011120211203112041120511206112071120811209112101121111212112131121411215112161121711218112191122011221112221122311224112251122611227112281122911230112311123211233112341123511236112371123811239112401124111242112431124411245112461124711248112491125011251112521125311254112551125611257112581125911260112611126211263112641126511266112671126811269112701127111272112731127411275112761127711278112791128011281112821128311284112851128611287112881128911290112911129211293112941129511296112971129811299113001130111302113031130411305113061130711308113091131011311113121131311314113151131611317113181131911320113211132211323113241132511326113271132811329113301133111332113331133411335113361133711338113391134011341113421134311344113451134611347113481134911350113511135211353113541135511356113571135811359113601136111362113631136411365113661136711368113691137011371113721137311374113751137611377113781137911380113811138211383113841138511386113871138811389113901139111392113931139411395113961139711398113991140011401114021140311404114051140611407114081140911410114111141211413114141141511416114171141811419114201142111422114231142411425114261142711428114291143011431114321143311434114351143611437114381143911440114411144211443114441144511446114471144811449114501145111452114531145411455114561145711458114591146011461114621146311464114651146611467114681146911470114711147211473114741147511476114771147811479114801148111482114831148411485114861148711488114891149011491114921149311494114951149611497114981149911500115011150211503115041150511506115071150811509115101151111512115131151411515115161151711518115191152011521115221152311524115251152611527115281152911530115311153211533115341153511536115371153811539115401154111542115431154411545115461154711548115491155011551115521155311554115551155611557115581155911560115611156211563115641156511566115671156811569115701157111572115731157411575115761157711578115791158011581115821158311584115851158611587115881158911590115911159211593115941159511596115971159811599116001160111602116031160411605116061160711608116091161011611116121161311614116151161611617116181161911620116211162211623116241162511626116271162811629116301163111632116331163411635116361163711638116391164011641116421164311644116451164611647116481164911650116511165211653116541165511656116571165811659116601166111662116631166411665116661166711668116691167011671116721167311674116751167611677116781167911680116811168211683116841168511686116871168811689116901169111692116931169411695116961169711698116991170011701117021170311704117051170611707117081170911710117111171211713117141171511716117171171811719117201172111722117231172411725117261172711728117291173011731117321173311734117351173611737117381173911740117411174211743117441174511746117471174811749117501175111752117531175411755117561175711758117591176011761117621176311764117651176611767117681176911770117711177211773117741177511776117771177811779117801178111782117831178411785117861178711788117891179011791117921179311794117951179611797117981179911800118011180211803118041180511806118071180811809118101181111812118131181411815118161181711818118191182011821118221182311824118251182611827118281182911830118311183211833118341183511836118371183811839118401184111842118431184411845118461184711848118491185011851118521185311854118551185611857118581185911860118611186211863118641186511866118671186811869118701187111872118731187411875118761187711878118791188011881118821188311884118851188611887118881188911890118911189211893118941189511896118971189811899119001190111902119031190411905119061190711908119091191011911119121191311914119151191611917119181191911920119211192211923119241192511926119271192811929119301193111932119331193411935119361193711938119391194011941119421194311944119451194611947119481194911950119511195211953119541195511956119571195811959119601196111962119631196411965119661196711968119691197011971119721197311974119751197611977119781197911980119811198211983119841198511986119871198811989119901199111992119931199411995119961199711998119991200012001120021200312004120051200612007120081200912010120111201212013120141201512016120171201812019120201202112022120231202412025120261202712028120291203012031120321203312034120351203612037120381203912040120411204212043120441204512046120471204812049120501205112052120531205412055120561205712058120591206012061120621206312064120651206612067120681206912070120711207212073120741207512076120771207812079120801208112082120831208412085120861208712088120891209012091120921209312094120951209612097120981209912100121011210212103121041210512106121071210812109121101211112112121131211412115121161211712118121191212012121121221212312124121251212612127121281212912130121311213212133121341213512136121371213812139121401214112142121431214412145121461214712148121491215012151121521215312154121551215612157121581215912160121611216212163121641216512166121671216812169121701217112172121731217412175121761217712178121791218012181121821218312184121851218612187121881218912190121911219212193121941219512196121971219812199122001220112202122031220412205122061220712208122091221012211122121221312214122151221612217122181221912220122211222212223122241222512226122271222812229122301223112232122331223412235122361223712238122391224012241122421224312244122451224612247122481224912250122511225212253122541225512256122571225812259122601226112262122631226412265122661226712268122691227012271122721227312274122751227612277122781227912280122811228212283122841228512286122871228812289122901229112292122931229412295122961229712298122991230012301123021230312304123051230612307123081230912310123111231212313123141231512316123171231812319123201232112322123231232412325123261232712328123291233012331123321233312334123351233612337123381233912340123411234212343123441234512346123471234812349123501235112352123531235412355123561235712358123591236012361123621236312364123651236612367123681236912370123711237212373123741237512376123771237812379123801238112382123831238412385123861238712388123891239012391123921239312394123951239612397123981239912400124011240212403124041240512406124071240812409124101241112412124131241412415124161241712418124191242012421124221242312424124251242612427124281242912430124311243212433124341243512436124371243812439124401244112442124431244412445124461244712448124491245012451124521245312454124551245612457124581245912460124611246212463124641246512466124671246812469124701247112472124731247412475124761247712478124791248012481124821248312484124851248612487124881248912490124911249212493124941249512496124971249812499125001250112502125031250412505125061250712508125091251012511125121251312514125151251612517125181251912520125211252212523125241252512526125271252812529125301253112532125331253412535125361253712538125391254012541125421254312544125451254612547125481254912550125511255212553125541255512556125571255812559125601256112562125631256412565125661256712568125691257012571125721257312574125751257612577125781257912580125811258212583125841258512586125871258812589125901259112592125931259412595125961259712598125991260012601126021260312604126051260612607126081260912610126111261212613126141261512616126171261812619126201262112622126231262412625126261262712628126291263012631126321263312634126351263612637126381263912640126411264212643126441264512646126471264812649126501265112652126531265412655126561265712658126591266012661126621266312664126651266612667126681266912670126711267212673126741267512676126771267812679126801268112682126831268412685126861268712688126891269012691126921269312694126951269612697126981269912700127011270212703127041270512706127071270812709127101271112712127131271412715127161271712718127191272012721127221272312724127251272612727127281272912730127311273212733127341273512736127371273812739127401274112742127431274412745127461274712748127491275012751127521275312754127551275612757127581275912760127611276212763127641276512766127671276812769127701277112772127731277412775127761277712778127791278012781127821278312784127851278612787127881278912790127911279212793127941279512796127971279812799128001280112802128031280412805128061280712808128091281012811128121281312814128151281612817128181281912820128211282212823128241282512826128271282812829128301283112832128331283412835128361283712838128391284012841128421284312844128451284612847128481284912850128511285212853128541285512856128571285812859128601286112862128631286412865128661286712868128691287012871128721287312874128751287612877128781287912880128811288212883128841288512886128871288812889128901289112892128931289412895128961289712898128991290012901129021290312904129051290612907129081290912910129111291212913129141291512916129171291812919129201292112922129231292412925129261292712928129291293012931129321293312934129351293612937129381293912940129411294212943129441294512946129471294812949129501295112952129531295412955129561295712958129591296012961129621296312964129651296612967129681296912970129711297212973129741297512976129771297812979129801298112982129831298412985129861298712988129891299012991129921299312994129951299612997129981299913000130011300213003130041300513006130071300813009130101301113012130131301413015130161301713018130191302013021130221302313024130251302613027130281302913030130311303213033130341303513036130371303813039130401304113042130431304413045130461304713048130491305013051130521305313054130551305613057130581305913060130611306213063130641306513066130671306813069130701307113072130731307413075130761307713078130791308013081130821308313084130851308613087130881308913090130911309213093130941309513096130971309813099131001310113102131031310413105131061310713108131091311013111131121311313114131151311613117131181311913120131211312213123131241312513126131271312813129131301313113132131331313413135131361313713138131391314013141131421314313144131451314613147131481314913150131511315213153131541315513156131571315813159131601316113162131631316413165131661316713168131691317013171131721317313174131751317613177131781317913180131811318213183131841318513186131871318813189131901319113192131931319413195131961319713198131991320013201132021320313204132051320613207132081320913210132111321213213132141321513216132171321813219132201322113222132231322413225132261322713228132291323013231132321323313234132351323613237132381323913240132411324213243132441324513246132471324813249132501325113252132531325413255132561325713258132591326013261132621326313264132651326613267132681326913270132711327213273132741327513276132771327813279132801328113282132831328413285132861328713288132891329013291132921329313294132951329613297132981329913300133011330213303133041330513306133071330813309133101331113312133131331413315133161331713318133191332013321133221332313324133251332613327133281332913330133311333213333133341333513336133371333813339133401334113342133431334413345133461334713348133491335013351133521335313354133551335613357133581335913360133611336213363133641336513366133671336813369133701337113372133731337413375133761337713378133791338013381133821338313384133851338613387133881338913390133911339213393133941339513396133971339813399134001340113402134031340413405134061340713408134091341013411134121341313414134151341613417134181341913420134211342213423134241342513426134271342813429134301343113432134331343413435134361343713438134391344013441134421344313444134451344613447134481344913450134511345213453134541345513456134571345813459134601346113462134631346413465134661346713468134691347013471134721347313474134751347613477134781347913480134811348213483134841348513486134871348813489134901349113492134931349413495134961349713498134991350013501135021350313504135051350613507135081350913510135111351213513135141351513516135171351813519135201352113522135231352413525135261352713528135291353013531135321353313534135351353613537135381353913540135411354213543135441354513546135471354813549135501355113552135531355413555135561355713558135591356013561135621356313564135651356613567135681356913570135711357213573135741357513576135771357813579135801358113582135831358413585135861358713588135891359013591135921359313594135951359613597135981359913600136011360213603136041360513606136071360813609136101361113612136131361413615136161361713618136191362013621136221362313624136251362613627136281362913630136311363213633136341363513636136371363813639136401364113642136431364413645136461364713648136491365013651136521365313654136551365613657136581365913660136611366213663136641366513666136671366813669136701367113672136731367413675136761367713678136791368013681136821368313684136851368613687136881368913690136911369213693136941369513696136971369813699137001370113702137031370413705137061370713708137091371013711137121371313714137151371613717137181371913720137211372213723137241372513726137271372813729137301373113732137331373413735137361373713738137391374013741137421374313744137451374613747137481374913750137511375213753137541375513756137571375813759137601376113762137631376413765137661376713768137691377013771137721377313774137751377613777137781377913780137811378213783137841378513786137871378813789137901379113792137931379413795137961379713798137991380013801138021380313804138051380613807138081380913810138111381213813138141381513816138171381813819138201382113822138231382413825138261382713828138291383013831138321383313834138351383613837138381383913840138411384213843138441384513846138471384813849138501385113852138531385413855138561385713858138591386013861138621386313864138651386613867138681386913870138711387213873138741387513876138771387813879138801388113882138831388413885138861388713888138891389013891138921389313894138951389613897138981389913900139011390213903139041390513906139071390813909139101391113912139131391413915139161391713918139191392013921139221392313924139251392613927139281392913930139311393213933139341393513936139371393813939139401394113942139431394413945139461394713948139491395013951139521395313954139551395613957139581395913960139611396213963139641396513966139671396813969139701397113972139731397413975 |
- # -*- coding: utf-8 -*-
- """Adds docstrings to functions defined in the torch._C"""
- import re
- import torch._C
- from torch._C import _add_docstr as add_docstr
- def parse_kwargs(desc):
- """Maps a description of args to a dictionary of {argname: description}.
- Input:
- (' weight (Tensor): a weight tensor\n' +
- ' Some optional description')
- Output: {
- 'weight': \
- 'weight (Tensor): a weight tensor\n Some optional description'
- }
- """
- # Split on exactly 4 spaces after a newline
- regx = re.compile(r"\n\s{4}(?!\s)")
- kwargs = [section.strip() for section in regx.split(desc)]
- kwargs = [section for section in kwargs if len(section) > 0]
- return {desc.split(" ")[0]: desc for desc in kwargs}
- def merge_dicts(*dicts):
- return {x: d[x] for d in dicts for x in d}
- common_args = parse_kwargs(
- """
- input (Tensor): the input tensor.
- generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
- out (Tensor, optional): the output tensor.
- memory_format (:class:`torch.memory_format`, optional): the desired memory format of
- returned tensor. Default: ``torch.preserve_format``.
- """
- )
- reduceops_common_args = merge_dicts(
- common_args,
- parse_kwargs(
- """
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- If specified, the input tensor is casted to :attr:`dtype` before the operation
- is performed. This is useful for preventing data type overflows. Default: None.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
- """
- ),
- )
- multi_dim_common = merge_dicts(
- reduceops_common_args,
- parse_kwargs(
- """
- dim (int or tuple of ints): the dimension or dimensions to reduce.
- """
- ),
- {
- "keepdim_details": """
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
- """
- },
- {
- "opt_dim": """
- dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
- If ``None``, all dimensions are reduced.
- """
- },
- )
- single_dim_common = merge_dicts(
- reduceops_common_args,
- parse_kwargs(
- """
- dim (int): the dimension to reduce.
- """
- ),
- {
- "keepdim_details": """If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
- the output tensor having 1 fewer dimension than :attr:`input`."""
- },
- )
- factory_common_args = merge_dicts(
- common_args,
- parse_kwargs(
- """
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`).
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- pin_memory (bool, optional): If set, returned tensor would be allocated in
- the pinned memory. Works only for CPU tensors. Default: ``False``.
- memory_format (:class:`torch.memory_format`, optional): the desired memory format of
- returned Tensor. Default: ``torch.contiguous_format``.
- check_invariants (bool, optional): If sparse tensor invariants are checked.
- Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`,
- initially False.
- """
- ),
- {
- "sparse_factory_device_note": """\
- .. note::
- If the ``device`` argument is not specified the device of the given
- :attr:`values` and indices tensor(s) must match. If, however, the
- argument is specified the input Tensors will be converted to the
- given device and in turn determine the device of the constructed
- sparse tensor."""
- },
- )
- factory_like_common_args = parse_kwargs(
- """
- input (Tensor): the size of :attr:`input` will determine size of the output tensor.
- layout (:class:`torch.layout`, optional): the desired layout of returned tensor.
- Default: if ``None``, defaults to the layout of :attr:`input`.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor.
- Default: if ``None``, defaults to the dtype of :attr:`input`.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, defaults to the device of :attr:`input`.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- pin_memory (bool, optional): If set, returned tensor would be allocated in
- the pinned memory. Works only for CPU tensors. Default: ``False``.
- memory_format (:class:`torch.memory_format`, optional): the desired memory format of
- returned Tensor. Default: ``torch.preserve_format``.
- """
- )
- factory_data_common_args = parse_kwargs(
- """
- data (array_like): Initial data for the tensor. Can be a list, tuple,
- NumPy ``ndarray``, scalar, and other types.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, infers data type from :attr:`data`.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- pin_memory (bool, optional): If set, returned tensor would be allocated in
- the pinned memory. Works only for CPU tensors. Default: ``False``.
- """
- )
- tf32_notes = {
- "tf32_note": """This operator supports :ref:`TensorFloat32<tf32_on_ampere>`."""
- }
- rocm_fp16_notes = {
- "rocm_fp16_note": """On certain ROCm devices, when using float16 inputs this module will use \
- :ref:`different precision<fp16_on_mi200>` for backward."""
- }
- reproducibility_notes = {
- "forward_reproducibility_note": """This operation may behave nondeterministically when given tensors on \
- a CUDA device. See :doc:`/notes/randomness` for more information.""",
- "backward_reproducibility_note": """This operation may produce nondeterministic gradients when given tensors on \
- a CUDA device. See :doc:`/notes/randomness` for more information.""",
- "cudnn_reproducibility_note": """In some circumstances when given tensors on a CUDA device \
- and using CuDNN, this operator may select a nondeterministic algorithm to increase performance. If this is \
- undesirable, you can try to make the operation deterministic (potentially at \
- a performance cost) by setting ``torch.backends.cudnn.deterministic = True``. \
- See :doc:`/notes/randomness` for more information.""",
- }
- sparse_support_notes = {
- "sparse_beta_warning": """
- .. warning::
- Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported,
- or may not have autograd support. If you notice missing functionality please
- open a feature request.""",
- }
- add_docstr(
- torch.abs,
- r"""
- abs(input, *, out=None) -> Tensor
- Computes the absolute value of each element in :attr:`input`.
- .. math::
- \text{out}_{i} = |\text{input}_{i}|
- """
- + r"""
- Args:
- {input}
- Keyword args:
- {out}
- Example::
- >>> torch.abs(torch.tensor([-1, -2, 3]))
- tensor([ 1, 2, 3])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.absolute,
- r"""
- absolute(input, *, out=None) -> Tensor
- Alias for :func:`torch.abs`
- """,
- )
- add_docstr(
- torch.acos,
- r"""
- acos(input, *, out=None) -> Tensor
- Computes the inverse cosine of each element in :attr:`input`.
- .. math::
- \text{out}_{i} = \cos^{-1}(\text{input}_{i})
- """
- + r"""
- Args:
- {input}
- Keyword args:
- {out}
- Example::
- >>> a = torch.randn(4)
- >>> a
- tensor([ 0.3348, -0.5889, 0.2005, -0.1584])
- >>> torch.acos(a)
- tensor([ 1.2294, 2.2004, 1.3690, 1.7298])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.arccos,
- r"""
- arccos(input, *, out=None) -> Tensor
- Alias for :func:`torch.acos`.
- """,
- )
- add_docstr(
- torch.acosh,
- r"""
- acosh(input, *, out=None) -> Tensor
- Returns a new tensor with the inverse hyperbolic cosine of the elements of :attr:`input`.
- .. math::
- \text{out}_{i} = \cosh^{-1}(\text{input}_{i})
- Note:
- The domain of the inverse hyperbolic cosine is `[1, inf)` and values outside this range
- will be mapped to ``NaN``, except for `+ INF` for which the output is mapped to `+ INF`.
- """
- + r"""
- Args:
- {input}
- Keyword arguments:
- {out}
- Example::
- >>> a = torch.randn(4).uniform_(1, 2)
- >>> a
- tensor([ 1.3192, 1.9915, 1.9674, 1.7151 ])
- >>> torch.acosh(a)
- tensor([ 0.7791, 1.3120, 1.2979, 1.1341 ])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.arccosh,
- r"""
- arccosh(input, *, out=None) -> Tensor
- Alias for :func:`torch.acosh`.
- """,
- )
- add_docstr(
- torch.index_add,
- r"""
- index_add(input, dim, index, source, *, alpha=1, out=None) -> Tensor
- See :meth:`~Tensor.index_add_` for function description.
- """,
- )
- add_docstr(
- torch.index_copy,
- r"""
- index_copy(input, dim, index, source, *, out=None) -> Tensor
- See :meth:`~Tensor.index_add_` for function description.
- """,
- )
- add_docstr(
- torch.index_reduce,
- r"""
- index_reduce(input, dim, index, source, reduce, *, include_self=True, out=None) -> Tensor
- See :meth:`~Tensor.index_reduce_` for function description.
- """,
- )
- add_docstr(
- torch.add,
- r"""
- add(input, other, *, alpha=1, out=None) -> Tensor
- Adds :attr:`other`, scaled by :attr:`alpha`, to :attr:`input`.
- .. math::
- \text{{out}}_i = \text{{input}}_i + \text{{alpha}} \times \text{{other}}_i
- """
- + r"""
- Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
- :ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
- Args:
- {input}
- other (Tensor or Number): the tensor or number to add to :attr:`input`.
- Keyword arguments:
- alpha (Number): the multiplier for :attr:`other`.
- {out}
- Examples::
- >>> a = torch.randn(4)
- >>> a
- tensor([ 0.0202, 1.0985, 1.3506, -0.6056])
- >>> torch.add(a, 20)
- tensor([ 20.0202, 21.0985, 21.3506, 19.3944])
- >>> b = torch.randn(4)
- >>> b
- tensor([-0.9732, -0.3497, 0.6245, 0.4022])
- >>> c = torch.randn(4, 1)
- >>> c
- tensor([[ 0.3743],
- [-1.7724],
- [-0.5811],
- [-0.8017]])
- >>> torch.add(b, c, alpha=10)
- tensor([[ 2.7695, 3.3930, 4.3672, 4.1450],
- [-18.6971, -18.0736, -17.0994, -17.3216],
- [ -6.7845, -6.1610, -5.1868, -5.4090],
- [ -8.9902, -8.3667, -7.3925, -7.6147]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.addbmm,
- r"""
- addbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
- Performs a batch matrix-matrix product of matrices stored
- in :attr:`batch1` and :attr:`batch2`,
- with a reduced add step (all matrix multiplications get accumulated
- along the first dimension).
- :attr:`input` is added to the final result.
- :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the
- same number of matrices.
- If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
- :math:`(b \times m \times p)` tensor, :attr:`input` must be
- :ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
- and :attr:`out` will be a :math:`(n \times p)` tensor.
- .. math::
- out = \beta\ \text{input} + \alpha\ (\sum_{i=0}^{b-1} \text{batch1}_i \mathbin{@} \text{batch2}_i)
- If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
- it will not be propagated.
- """
- + r"""
- For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and :attr:`alpha`
- must be real numbers, otherwise they should be integers.
- {tf32_note}
- {rocm_fp16_note}
- Args:
- batch1 (Tensor): the first batch of matrices to be multiplied
- batch2 (Tensor): the second batch of matrices to be multiplied
- Keyword args:
- beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
- input (Tensor): matrix to be added
- alpha (Number, optional): multiplier for `batch1 @ batch2` (:math:`\alpha`)
- {out}
- Example::
- >>> M = torch.randn(3, 5)
- >>> batch1 = torch.randn(10, 3, 4)
- >>> batch2 = torch.randn(10, 4, 5)
- >>> torch.addbmm(M, batch1, batch2)
- tensor([[ 6.6311, 0.0503, 6.9768, -12.0362, -2.1653],
- [ -4.8185, -1.4255, -6.6760, 8.9453, 2.5743],
- [ -3.8202, 4.3691, 1.0943, -1.1109, 5.4730]])
- """.format(
- **common_args, **tf32_notes, **rocm_fp16_notes
- ),
- )
- add_docstr(
- torch.addcdiv,
- r"""
- addcdiv(input, tensor1, tensor2, *, value=1, out=None) -> Tensor
- Performs the element-wise division of :attr:`tensor1` by :attr:`tensor2`,
- multiplies the result by the scalar :attr:`value` and adds it to :attr:`input`.
- .. warning::
- Integer division with addcdiv is no longer supported, and in a future
- release addcdiv will perform a true division of tensor1 and tensor2.
- The historic addcdiv behavior can be implemented as
- (input + value * torch.trunc(tensor1 / tensor2)).to(input.dtype)
- for integer inputs and as (input + value * tensor1 / tensor2) for float inputs.
- The future addcdiv behavior is just the latter implementation:
- (input + value * tensor1 / tensor2), for all dtypes.
- .. math::
- \text{out}_i = \text{input}_i + \text{value} \times \frac{\text{tensor1}_i}{\text{tensor2}_i}
- """
- + r"""
- The shapes of :attr:`input`, :attr:`tensor1`, and :attr:`tensor2` must be
- :ref:`broadcastable <broadcasting-semantics>`.
- For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be
- a real number, otherwise an integer.
- Args:
- input (Tensor): the tensor to be added
- tensor1 (Tensor): the numerator tensor
- tensor2 (Tensor): the denominator tensor
- Keyword args:
- value (Number, optional): multiplier for :math:`\text{{tensor1}} / \text{{tensor2}}`
- {out}
- Example::
- >>> t = torch.randn(1, 3)
- >>> t1 = torch.randn(3, 1)
- >>> t2 = torch.randn(1, 3)
- >>> torch.addcdiv(t, t1, t2, value=0.1)
- tensor([[-0.2312, -3.6496, 0.1312],
- [-1.0428, 3.4292, -0.1030],
- [-0.5369, -0.9829, 0.0430]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.addcmul,
- r"""
- addcmul(input, tensor1, tensor2, *, value=1, out=None) -> Tensor
- Performs the element-wise multiplication of :attr:`tensor1`
- by :attr:`tensor2`, multiplies the result by the scalar :attr:`value`
- and adds it to :attr:`input`.
- .. math::
- \text{out}_i = \text{input}_i + \text{value} \times \text{tensor1}_i \times \text{tensor2}_i
- """
- + r"""
- The shapes of :attr:`tensor`, :attr:`tensor1`, and :attr:`tensor2` must be
- :ref:`broadcastable <broadcasting-semantics>`.
- For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be
- a real number, otherwise an integer.
- Args:
- input (Tensor): the tensor to be added
- tensor1 (Tensor): the tensor to be multiplied
- tensor2 (Tensor): the tensor to be multiplied
- Keyword args:
- value (Number, optional): multiplier for :math:`tensor1 .* tensor2`
- {out}
- Example::
- >>> t = torch.randn(1, 3)
- >>> t1 = torch.randn(3, 1)
- >>> t2 = torch.randn(1, 3)
- >>> torch.addcmul(t, t1, t2, value=0.1)
- tensor([[-0.8635, -0.6391, 1.6174],
- [-0.7617, -0.5879, 1.7388],
- [-0.8353, -0.6249, 1.6511]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.addmm,
- r"""
- addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor
- Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`.
- The matrix :attr:`input` is added to the final result.
- If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a
- :math:`(m \times p)` tensor, then :attr:`input` must be
- :ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
- and :attr:`out` will be a :math:`(n \times p)` tensor.
- :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
- :attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively.
- .. math::
- \text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i)
- If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
- it will not be propagated.
- """
- + r"""
- For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
- :attr:`alpha` must be real numbers, otherwise they should be integers.
- This operation has support for arguments with :ref:`sparse layouts<sparse-docs>`. If
- :attr:`input` is sparse the result will have the same layout and if :attr:`out`
- is provided it must have the same layout as :attr:`input`.
- {sparse_beta_warning}
- {tf32_note}
- {rocm_fp16_note}
- Args:
- input (Tensor): matrix to be added
- mat1 (Tensor): the first matrix to be matrix multiplied
- mat2 (Tensor): the second matrix to be matrix multiplied
- Keyword args:
- beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
- alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
- {out}
- Example::
- >>> M = torch.randn(2, 3)
- >>> mat1 = torch.randn(2, 3)
- >>> mat2 = torch.randn(3, 3)
- >>> torch.addmm(M, mat1, mat2)
- tensor([[-4.8716, 1.4671, -1.3746],
- [ 0.7573, -3.9555, -2.8681]])
- """.format(
- **common_args, **tf32_notes, **rocm_fp16_notes, **sparse_support_notes
- ),
- )
- add_docstr(
- torch.adjoint,
- r"""
- adjoint(Tensor) -> Tensor
- Returns a view of the tensor conjugated and with the last two dimensions transposed.
- ``x.adjoint()`` is equivalent to ``x.transpose(-2, -1).conj()`` for complex tensors and
- to ``x.transpose(-2, -1)`` for real tensors.
- Example::
- >>> x = torch.arange(4, dtype=torch.float)
- >>> A = torch.complex(x, x).reshape(2, 2)
- >>> A
- tensor([[0.+0.j, 1.+1.j],
- [2.+2.j, 3.+3.j]])
- >>> A.adjoint()
- tensor([[0.-0.j, 2.-2.j],
- [1.-1.j, 3.-3.j]])
- >>> (A.adjoint() == A.mH).all()
- tensor(True)
- """,
- )
- add_docstr(
- torch.sspaddmm,
- r"""
- sspaddmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor
- Matrix multiplies a sparse tensor :attr:`mat1` with a dense tensor
- :attr:`mat2`, then adds the sparse tensor :attr:`input` to the result.
- Note: This function is equivalent to :func:`torch.addmm`, except
- :attr:`input` and :attr:`mat1` are sparse.
- Args:
- input (Tensor): a sparse matrix to be added
- mat1 (Tensor): a sparse matrix to be matrix multiplied
- mat2 (Tensor): a dense matrix to be matrix multiplied
- Keyword args:
- beta (Number, optional): multiplier for :attr:`mat` (:math:`\beta`)
- alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
- {out}
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.smm,
- r"""
- smm(input, mat) -> Tensor
- Performs a matrix multiplication of the sparse matrix :attr:`input`
- with the dense matrix :attr:`mat`.
- Args:
- input (Tensor): a sparse matrix to be matrix multiplied
- mat (Tensor): a dense matrix to be matrix multiplied
- """,
- )
- add_docstr(
- torch.addmv,
- r"""
- addmv(input, mat, vec, *, beta=1, alpha=1, out=None) -> Tensor
- Performs a matrix-vector product of the matrix :attr:`mat` and
- the vector :attr:`vec`.
- The vector :attr:`input` is added to the final result.
- If :attr:`mat` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of
- size `m`, then :attr:`input` must be
- :ref:`broadcastable <broadcasting-semantics>` with a 1-D tensor of size `n` and
- :attr:`out` will be 1-D tensor of size `n`.
- :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
- :attr:`mat` and :attr:`vec` and the added tensor :attr:`input` respectively.
- .. math::
- \text{out} = \beta\ \text{input} + \alpha\ (\text{mat} \mathbin{@} \text{vec})
- If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
- it will not be propagated.
- """
- + r"""
- For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
- :attr:`alpha` must be real numbers, otherwise they should be integers.
- Args:
- input (Tensor): vector to be added
- mat (Tensor): matrix to be matrix multiplied
- vec (Tensor): vector to be matrix multiplied
- Keyword args:
- beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
- alpha (Number, optional): multiplier for :math:`mat @ vec` (:math:`\alpha`)
- {out}
- Example::
- >>> M = torch.randn(2)
- >>> mat = torch.randn(2, 3)
- >>> vec = torch.randn(3)
- >>> torch.addmv(M, mat, vec)
- tensor([-0.3768, -5.5565])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.addr,
- r"""
- addr(input, vec1, vec2, *, beta=1, alpha=1, out=None) -> Tensor
- Performs the outer-product of vectors :attr:`vec1` and :attr:`vec2`
- and adds it to the matrix :attr:`input`.
- Optional values :attr:`beta` and :attr:`alpha` are scaling factors on the
- outer product between :attr:`vec1` and :attr:`vec2` and the added matrix
- :attr:`input` respectively.
- .. math::
- \text{out} = \beta\ \text{input} + \alpha\ (\text{vec1} \otimes \text{vec2})
- If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
- it will not be propagated.
- """
- + r"""
- If :attr:`vec1` is a vector of size `n` and :attr:`vec2` is a vector
- of size `m`, then :attr:`input` must be
- :ref:`broadcastable <broadcasting-semantics>` with a matrix of size
- :math:`(n \times m)` and :attr:`out` will be a matrix of size
- :math:`(n \times m)`.
- Args:
- input (Tensor): matrix to be added
- vec1 (Tensor): the first vector of the outer product
- vec2 (Tensor): the second vector of the outer product
- Keyword args:
- beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
- alpha (Number, optional): multiplier for :math:`\text{{vec1}} \otimes \text{{vec2}}` (:math:`\alpha`)
- {out}
- Example::
- >>> vec1 = torch.arange(1., 4.)
- >>> vec2 = torch.arange(1., 3.)
- >>> M = torch.zeros(3, 2)
- >>> torch.addr(M, vec1, vec2)
- tensor([[ 1., 2.],
- [ 2., 4.],
- [ 3., 6.]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.allclose,
- r"""
- allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False) -> bool
- This function checks if :attr:`input` and :attr:`other` satisfy the condition:
- .. math::
- \lvert \text{input} - \text{other} \rvert \leq \texttt{atol} + \texttt{rtol} \times \lvert \text{other} \rvert
- """
- + r"""
- elementwise, for all elements of :attr:`input` and :attr:`other`. The behaviour of this function is analogous to
- `numpy.allclose <https://docs.scipy.org/doc/numpy/reference/generated/numpy.allclose.html>`_
- Args:
- input (Tensor): first tensor to compare
- other (Tensor): second tensor to compare
- atol (float, optional): absolute tolerance. Default: 1e-08
- rtol (float, optional): relative tolerance. Default: 1e-05
- equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal. Default: ``False``
- Example::
- >>> torch.allclose(torch.tensor([10000., 1e-07]), torch.tensor([10000.1, 1e-08]))
- False
- >>> torch.allclose(torch.tensor([10000., 1e-08]), torch.tensor([10000.1, 1e-09]))
- True
- >>> torch.allclose(torch.tensor([1.0, float('nan')]), torch.tensor([1.0, float('nan')]))
- False
- >>> torch.allclose(torch.tensor([1.0, float('nan')]), torch.tensor([1.0, float('nan')]), equal_nan=True)
- True
- """,
- )
- add_docstr(
- torch.all,
- r"""
- all(input) -> Tensor
- Tests if all elements in :attr:`input` evaluate to `True`.
- .. note:: This function matches the behaviour of NumPy in returning
- output of dtype `bool` for all supported dtypes except `uint8`.
- For `uint8` the dtype of output is `uint8` itself.
- Example::
- >>> a = torch.rand(1, 2).bool()
- >>> a
- tensor([[False, True]], dtype=torch.bool)
- >>> torch.all(a)
- tensor(False, dtype=torch.bool)
- >>> a = torch.arange(0, 3)
- >>> a
- tensor([0, 1, 2])
- >>> torch.all(a)
- tensor(False)
- .. function:: all(input, dim, keepdim=False, *, out=None) -> Tensor
- :noindex:
- For each row of :attr:`input` in the given dimension :attr:`dim`,
- returns `True` if all elements in the row evaluate to `True` and `False` otherwise.
- {keepdim_details}
- Args:
- {input}
- {dim}
- {keepdim}
- Keyword args:
- {out}
- Example::
- >>> a = torch.rand(4, 2).bool()
- >>> a
- tensor([[True, True],
- [True, False],
- [True, True],
- [True, True]], dtype=torch.bool)
- >>> torch.all(a, dim=1)
- tensor([ True, False, True, True], dtype=torch.bool)
- >>> torch.all(a, dim=0)
- tensor([ True, False], dtype=torch.bool)
- """.format(
- **single_dim_common
- ),
- )
- add_docstr(
- torch.any,
- r"""
- any(input) -> Tensor
- Tests if any element in :attr:`input` evaluates to `True`.
- .. note:: This function matches the behaviour of NumPy in returning
- output of dtype `bool` for all supported dtypes except `uint8`.
- For `uint8` the dtype of output is `uint8` itself.
- Example::
- >>> a = torch.rand(1, 2).bool()
- >>> a
- tensor([[False, True]], dtype=torch.bool)
- >>> torch.any(a)
- tensor(True, dtype=torch.bool)
- >>> a = torch.arange(0, 3)
- >>> a
- tensor([0, 1, 2])
- >>> torch.any(a)
- tensor(True)
- .. function:: any(input, dim, keepdim=False, *, out=None) -> Tensor
- :noindex:
- For each row of :attr:`input` in the given dimension :attr:`dim`,
- returns `True` if any element in the row evaluate to `True` and `False` otherwise.
- {keepdim_details}
- Args:
- {input}
- {dim}
- {keepdim}
- Keyword args:
- {out}
- Example::
- >>> a = torch.randn(4, 2) < 0
- >>> a
- tensor([[ True, True],
- [False, True],
- [ True, True],
- [False, False]])
- >>> torch.any(a, 1)
- tensor([ True, True, True, False])
- >>> torch.any(a, 0)
- tensor([True, True])
- """.format(
- **single_dim_common
- ),
- )
- add_docstr(
- torch.angle,
- r"""
- angle(input, *, out=None) -> Tensor
- Computes the element-wise angle (in radians) of the given :attr:`input` tensor.
- .. math::
- \text{out}_{i} = angle(\text{input}_{i})
- """
- + r"""
- Args:
- {input}
- Keyword args:
- {out}
- .. note:: Starting in PyTorch 1.8, angle returns pi for negative real numbers,
- zero for non-negative real numbers, and propagates NaNs. Previously
- the function would return zero for all real numbers and not propagate
- floating-point NaNs.
- Example::
- >>> torch.angle(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]))*180/3.14159
- tensor([ 135., 135, -45])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.as_strided,
- r"""
- as_strided(input, size, stride, storage_offset=None) -> Tensor
- Create a view of an existing `torch.Tensor` :attr:`input` with specified
- :attr:`size`, :attr:`stride` and :attr:`storage_offset`.
- .. warning::
- Prefer using other view functions, like :meth:`torch.Tensor.expand`,
- to setting a view's strides manually with `as_strided`, as this
- function's behavior depends on the implementation of a tensor's storage.
- The constructed view of the storage must only refer to elements within
- the storage or a runtime error will be thrown, and if the view is
- "overlapped" (with multiple indices referring to the same element in
- memory) its behavior is undefined.
- Args:
- {input}
- size (tuple or ints): the shape of the output tensor
- stride (tuple or ints): the stride of the output tensor
- storage_offset (int, optional): the offset in the underlying storage of the output tensor.
- If ``None``, the storage_offset of the output tensor will match the input tensor.
- Example::
- >>> x = torch.randn(3, 3)
- >>> x
- tensor([[ 0.9039, 0.6291, 1.0795],
- [ 0.1586, 2.1939, -0.4900],
- [-0.1909, -0.7503, 1.9355]])
- >>> t = torch.as_strided(x, (2, 2), (1, 2))
- >>> t
- tensor([[0.9039, 1.0795],
- [0.6291, 0.1586]])
- >>> t = torch.as_strided(x, (2, 2), (1, 2), 1)
- tensor([[0.6291, 0.1586],
- [1.0795, 2.1939]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.as_tensor,
- r"""
- as_tensor(data, dtype=None, device=None) -> Tensor
- Converts :attr:`data` into a tensor, sharing data and preserving autograd
- history if possible.
- If :attr:`data` is already a tensor with the requested dtype and device
- then :attr:`data` itself is returned, but if :attr:`data` is a
- tensor with a different dtype or device then it's copied as if using
- `data.to(dtype=dtype, device=device)`.
- If :attr:`data` is a NumPy array (an ndarray) with the same dtype and device then a
- tensor is constructed using :func:`torch.from_numpy`.
- .. seealso::
- :func:`torch.tensor` never shares its data and creates a new "leaf tensor" (see :doc:`/notes/autograd`).
- Args:
- {data}
- {dtype}
- device (:class:`torch.device`, optional): the device of the constructed tensor. If None and data is a tensor
- then the device of data is used. If None and data is not a tensor then
- the result tensor is constructed on the CPU.
- Example::
- >>> a = numpy.array([1, 2, 3])
- >>> t = torch.as_tensor(a)
- >>> t
- tensor([ 1, 2, 3])
- >>> t[0] = -1
- >>> a
- array([-1, 2, 3])
- >>> a = numpy.array([1, 2, 3])
- >>> t = torch.as_tensor(a, device=torch.device('cuda'))
- >>> t
- tensor([ 1, 2, 3])
- >>> t[0] = -1
- >>> a
- array([1, 2, 3])
- """.format(
- **factory_data_common_args
- ),
- )
- add_docstr(
- torch.asin,
- r"""
- asin(input, *, out=None) -> Tensor
- Returns a new tensor with the arcsine of the elements of :attr:`input`.
- .. math::
- \text{out}_{i} = \sin^{-1}(\text{input}_{i})
- """
- + r"""
- Args:
- {input}
- Keyword args:
- {out}
- Example::
- >>> a = torch.randn(4)
- >>> a
- tensor([-0.5962, 1.4985, -0.4396, 1.4525])
- >>> torch.asin(a)
- tensor([-0.6387, nan, -0.4552, nan])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.arcsin,
- r"""
- arcsin(input, *, out=None) -> Tensor
- Alias for :func:`torch.asin`.
- """,
- )
- add_docstr(
- torch.asinh,
- r"""
- asinh(input, *, out=None) -> Tensor
- Returns a new tensor with the inverse hyperbolic sine of the elements of :attr:`input`.
- .. math::
- \text{out}_{i} = \sinh^{-1}(\text{input}_{i})
- """
- + r"""
- Args:
- {input}
- Keyword arguments:
- {out}
- Example::
- >>> a = torch.randn(4)
- >>> a
- tensor([ 0.1606, -1.4267, -1.0899, -1.0250 ])
- >>> torch.asinh(a)
- tensor([ 0.1599, -1.1534, -0.9435, -0.8990 ])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.arcsinh,
- r"""
- arcsinh(input, *, out=None) -> Tensor
- Alias for :func:`torch.asinh`.
- """,
- )
- add_docstr(
- torch.atan,
- r"""
- atan(input, *, out=None) -> Tensor
- Returns a new tensor with the arctangent of the elements of :attr:`input`.
- .. math::
- \text{out}_{i} = \tan^{-1}(\text{input}_{i})
- """
- + r"""
- Args:
- {input}
- Keyword args:
- {out}
- Example::
- >>> a = torch.randn(4)
- >>> a
- tensor([ 0.2341, 0.2539, -0.6256, -0.6448])
- >>> torch.atan(a)
- tensor([ 0.2299, 0.2487, -0.5591, -0.5727])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.arctan,
- r"""
- arctan(input, *, out=None) -> Tensor
- Alias for :func:`torch.atan`.
- """,
- )
- add_docstr(
- torch.atan2,
- r"""
- atan2(input, other, *, out=None) -> Tensor
- Element-wise arctangent of :math:`\text{{input}}_{{i}} / \text{{other}}_{{i}}`
- with consideration of the quadrant. Returns a new tensor with the signed angles
- in radians between vector :math:`(\text{{other}}_{{i}}, \text{{input}}_{{i}})`
- and vector :math:`(1, 0)`. (Note that :math:`\text{{other}}_{{i}}`, the second
- parameter, is the x-coordinate, while :math:`\text{{input}}_{{i}}`, the first
- parameter, is the y-coordinate.)
- The shapes of ``input`` and ``other`` must be
- :ref:`broadcastable <broadcasting-semantics>`.
- Args:
- input (Tensor): the first input tensor
- other (Tensor): the second input tensor
- Keyword args:
- {out}
- Example::
- >>> a = torch.randn(4)
- >>> a
- tensor([ 0.9041, 0.0196, -0.3108, -2.4423])
- >>> torch.atan2(a, torch.randn(4))
- tensor([ 0.9833, 0.0811, -1.9743, -1.4151])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.arctan2,
- r"""
- arctan2(input, other, *, out=None) -> Tensor
- Alias for :func:`torch.atan2`.
- """,
- )
- add_docstr(
- torch.atanh,
- r"""
- atanh(input, *, out=None) -> Tensor
- Returns a new tensor with the inverse hyperbolic tangent of the elements of :attr:`input`.
- Note:
- The domain of the inverse hyperbolic tangent is `(-1, 1)` and values outside this range
- will be mapped to ``NaN``, except for the values `1` and `-1` for which the output is
- mapped to `+/-INF` respectively.
- .. math::
- \text{out}_{i} = \tanh^{-1}(\text{input}_{i})
- """
- + r"""
- Args:
- {input}
- Keyword arguments:
- {out}
- Example::
- >>> a = torch.randn(4).uniform_(-1, 1)
- >>> a
- tensor([ -0.9385, 0.2968, -0.8591, -0.1871 ])
- >>> torch.atanh(a)
- tensor([ -1.7253, 0.3060, -1.2899, -0.1893 ])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.arctanh,
- r"""
- arctanh(input, *, out=None) -> Tensor
- Alias for :func:`torch.atanh`.
- """,
- )
- add_docstr(
- torch.asarray,
- r"""
- asarray(obj, *, dtype=None, device=None, copy=None, requires_grad=False) -> Tensor
- Converts :attr:`obj` to a tensor.
- :attr:`obj` can be one of:
- 1. a tensor
- 2. a NumPy array or a NumPy scalar
- 3. a DLPack capsule
- 4. an object that implements Python's buffer protocol
- 5. a scalar
- 6. a sequence of scalars
- When :attr:`obj` is a tensor, NumPy array, or DLPack capsule the returned tensor will,
- by default, not require a gradient, have the same datatype as :attr:`obj`, be on the
- same device, and share memory with it. These properties can be controlled with the
- :attr:`dtype`, :attr:`device`, :attr:`copy`, and :attr:`requires_grad` keyword arguments.
- If the returned tensor is of a different datatype, on a different device, or a copy is
- requested then it will not share its memory with :attr:`obj`. If :attr:`requires_grad`
- is ``True`` then the returned tensor will require a gradient, and if :attr:`obj` is
- also a tensor with an autograd history then the returned tensor will have the same history.
- When :attr:`obj` is not a tensor, NumPy array, or DLPack capsule but implements Python's
- buffer protocol then the buffer is interpreted as an array of bytes grouped according to
- the size of the datatype passed to the :attr:`dtype` keyword argument. (If no datatype is
- passed then the default floating point datatype is used, instead.) The returned tensor
- will have the specified datatype (or default floating point datatype if none is specified)
- and, by default, be on the CPU device and share memory with the buffer.
- When :attr:`obj` is a NumPy scalar, the returned tensor will be a 0-dimensional tensor on
- the CPU and that doesn't share its memory (i.e. ``copy=True``). By default datatype will
- be the PyTorch datatype corresponding to the NumPy's scalar's datatype.
- When :attr:`obj` is none of the above but a scalar, or a sequence of scalars then the
- returned tensor will, by default, infer its datatype from the scalar values, be on the
- CPU device, and not share its memory.
- .. seealso::
- :func:`torch.tensor` creates a tensor that always copies the data from the input object.
- :func:`torch.from_numpy` creates a tensor that always shares memory from NumPy arrays.
- :func:`torch.frombuffer` creates a tensor that always shares memory from objects that
- implement the buffer protocol.
- :func:`torch.from_dlpack` creates a tensor that always shares memory from
- DLPack capsules.
- Args:
- obj (object): a tensor, NumPy array, DLPack Capsule, object that implements Python's
- buffer protocol, scalar, or sequence of scalars.
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the datatype of the returned tensor.
- Default: ``None``, which causes the datatype of the returned tensor to be
- inferred from :attr:`obj`.
- copy (bool, optional): controls whether the returned tensor shares memory with :attr:`obj`.
- Default: ``None``, which causes the returned tensor to share memory with :attr:`obj`
- whenever possible. If ``True`` then the returned tensor does not share its memory.
- If ``False`` then the returned tensor shares its memory with :attr:`obj` and an
- error is thrown if it cannot.
- device (:class:`torch.device`, optional): the device of the returned tensor.
- Default: ``None``, which causes the device of :attr:`obj` to be used.
- requires_grad (bool, optional): whether the returned tensor requires grad.
- Default: ``False``, which causes the returned tensor not to require a gradient.
- If ``True``, then the returned tensor will require a gradient, and if :attr:`obj`
- is also a tensor with an autograd history then the returned tensor will have
- the same history.
- Example::
- >>> a = torch.tensor([1, 2, 3])
- >>> # Shares memory with tensor 'a'
- >>> b = torch.asarray(a)
- >>> a.data_ptr() == b.data_ptr()
- True
- >>> # Forces memory copy
- >>> c = torch.asarray(a, copy=True)
- >>> a.data_ptr() == c.data_ptr()
- False
- >>> a = torch.tensor([1, 2, 3], requires_grad=True).float()
- >>> b = a + 2
- >>> b
- tensor([1., 2., 3.], grad_fn=<AddBackward0>)
- >>> # Shares memory with tensor 'b', with no grad
- >>> c = torch.asarray(b)
- >>> c
- tensor([1., 2., 3.])
- >>> # Shares memory with tensor 'b', retaining autograd history
- >>> d = torch.asarray(b, requires_grad=True)
- >>> d
- tensor([1., 2., 3.], grad_fn=<AddBackward0>)
- >>> array = numpy.array([1, 2, 3])
- >>> # Shares memory with array 'array'
- >>> t1 = torch.asarray(array)
- >>> array.__array_interface__['data'][0] == t1.data_ptr()
- True
- >>> # Copies memory due to dtype mismatch
- >>> t2 = torch.asarray(array, dtype=torch.float32)
- >>> array.__array_interface__['data'][0] == t1.data_ptr()
- False
- >>> scalar = numpy.float64(0.5)
- >>> torch.asarray(scalar)
- tensor(0.5000, dtype=torch.float64)
- """,
- )
- add_docstr(
- torch.baddbmm,
- r"""
- baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
- Performs a batch matrix-matrix product of matrices in :attr:`batch1`
- and :attr:`batch2`.
- :attr:`input` is added to the final result.
- :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the same
- number of matrices.
- If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
- :math:`(b \times m \times p)` tensor, then :attr:`input` must be
- :ref:`broadcastable <broadcasting-semantics>` with a
- :math:`(b \times n \times p)` tensor and :attr:`out` will be a
- :math:`(b \times n \times p)` tensor. Both :attr:`alpha` and :attr:`beta` mean the
- same as the scaling factors used in :meth:`torch.addbmm`.
- .. math::
- \text{out}_i = \beta\ \text{input}_i + \alpha\ (\text{batch1}_i \mathbin{@} \text{batch2}_i)
- If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
- it will not be propagated.
- """
- + r"""
- For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
- :attr:`alpha` must be real numbers, otherwise they should be integers.
- {tf32_note}
- {rocm_fp16_note}
- Args:
- input (Tensor): the tensor to be added
- batch1 (Tensor): the first batch of matrices to be multiplied
- batch2 (Tensor): the second batch of matrices to be multiplied
- Keyword args:
- beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
- alpha (Number, optional): multiplier for :math:`\text{{batch1}} \mathbin{{@}} \text{{batch2}}` (:math:`\alpha`)
- {out}
- Example::
- >>> M = torch.randn(10, 3, 5)
- >>> batch1 = torch.randn(10, 3, 4)
- >>> batch2 = torch.randn(10, 4, 5)
- >>> torch.baddbmm(M, batch1, batch2).size()
- torch.Size([10, 3, 5])
- """.format(
- **common_args, **tf32_notes, **rocm_fp16_notes
- ),
- )
- add_docstr(
- torch.bernoulli,
- r"""
- bernoulli(input, *, generator=None, out=None) -> Tensor
- Draws binary random numbers (0 or 1) from a Bernoulli distribution.
- The :attr:`input` tensor should be a tensor containing probabilities
- to be used for drawing the binary random number.
- Hence, all values in :attr:`input` have to be in the range:
- :math:`0 \leq \text{input}_i \leq 1`.
- The :math:`\text{i}^{th}` element of the output tensor will draw a
- value :math:`1` according to the :math:`\text{i}^{th}` probability value given
- in :attr:`input`.
- .. math::
- \text{out}_{i} \sim \mathrm{Bernoulli}(p = \text{input}_{i})
- """
- + r"""
- The returned :attr:`out` tensor only has values 0 or 1 and is of the same
- shape as :attr:`input`.
- :attr:`out` can have integral ``dtype``, but :attr:`input` must have floating
- point ``dtype``.
- Args:
- input (Tensor): the input tensor of probability values for the Bernoulli distribution
- Keyword args:
- {generator}
- {out}
- Example::
- >>> a = torch.empty(3, 3).uniform_(0, 1) # generate a uniform random matrix with range [0, 1]
- >>> a
- tensor([[ 0.1737, 0.0950, 0.3609],
- [ 0.7148, 0.0289, 0.2676],
- [ 0.9456, 0.8937, 0.7202]])
- >>> torch.bernoulli(a)
- tensor([[ 1., 0., 0.],
- [ 0., 0., 0.],
- [ 1., 1., 1.]])
- >>> a = torch.ones(3, 3) # probability of drawing "1" is 1
- >>> torch.bernoulli(a)
- tensor([[ 1., 1., 1.],
- [ 1., 1., 1.],
- [ 1., 1., 1.]])
- >>> a = torch.zeros(3, 3) # probability of drawing "1" is 0
- >>> torch.bernoulli(a)
- tensor([[ 0., 0., 0.],
- [ 0., 0., 0.],
- [ 0., 0., 0.]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.bincount,
- r"""
- bincount(input, weights=None, minlength=0) -> Tensor
- Count the frequency of each value in an array of non-negative ints.
- The number of bins (size 1) is one larger than the largest value in
- :attr:`input` unless :attr:`input` is empty, in which case the result is a
- tensor of size 0. If :attr:`minlength` is specified, the number of bins is at least
- :attr:`minlength` and if :attr:`input` is empty, then the result is tensor of size
- :attr:`minlength` filled with zeros. If ``n`` is the value at position ``i``,
- ``out[n] += weights[i]`` if :attr:`weights` is specified else
- ``out[n] += 1``.
- Note:
- {backward_reproducibility_note}
- Arguments:
- input (Tensor): 1-d int tensor
- weights (Tensor): optional, weight for each value in the input tensor.
- Should be of same size as input tensor.
- minlength (int): optional, minimum number of bins. Should be non-negative.
- Returns:
- output (Tensor): a tensor of shape ``Size([max(input) + 1])`` if
- :attr:`input` is non-empty, else ``Size(0)``
- Example::
- >>> input = torch.randint(0, 8, (5,), dtype=torch.int64)
- >>> weights = torch.linspace(0, 1, steps=5)
- >>> input, weights
- (tensor([4, 3, 6, 3, 4]),
- tensor([ 0.0000, 0.2500, 0.5000, 0.7500, 1.0000])
- >>> torch.bincount(input)
- tensor([0, 0, 0, 2, 2, 0, 1])
- >>> input.bincount(weights)
- tensor([0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 0.0000, 0.5000])
- """.format(
- **reproducibility_notes
- ),
- )
- add_docstr(
- torch.bitwise_not,
- r"""
- bitwise_not(input, *, out=None) -> Tensor
- Computes the bitwise NOT of the given input tensor. The input tensor must be of
- integral or Boolean types. For bool tensors, it computes the logical NOT.
- Args:
- {input}
- Keyword args:
- {out}
- Example::
- >>> torch.bitwise_not(torch.tensor([-1, -2, 3], dtype=torch.int8))
- tensor([ 0, 1, -4], dtype=torch.int8)
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.bmm,
- r"""
- bmm(input, mat2, *, out=None) -> Tensor
- Performs a batch matrix-matrix product of matrices stored in :attr:`input`
- and :attr:`mat2`.
- :attr:`input` and :attr:`mat2` must be 3-D tensors each containing
- the same number of matrices.
- If :attr:`input` is a :math:`(b \times n \times m)` tensor, :attr:`mat2` is a
- :math:`(b \times m \times p)` tensor, :attr:`out` will be a
- :math:`(b \times n \times p)` tensor.
- .. math::
- \text{out}_i = \text{input}_i \mathbin{@} \text{mat2}_i
- """
- + r"""
- {tf32_note}
- {rocm_fp16_note}
- .. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
- For broadcasting matrix products, see :func:`torch.matmul`.
- Args:
- input (Tensor): the first batch of matrices to be multiplied
- mat2 (Tensor): the second batch of matrices to be multiplied
- Keyword Args:
- {out}
- Example::
- >>> input = torch.randn(10, 3, 4)
- >>> mat2 = torch.randn(10, 4, 5)
- >>> res = torch.bmm(input, mat2)
- >>> res.size()
- torch.Size([10, 3, 5])
- """.format(
- **common_args, **tf32_notes, **rocm_fp16_notes
- ),
- )
- add_docstr(
- torch.bitwise_and,
- r"""
- bitwise_and(input, other, *, out=None) -> Tensor
- Computes the bitwise AND of :attr:`input` and :attr:`other`. The input tensor must be of
- integral or Boolean types. For bool tensors, it computes the logical AND.
- Args:
- input: the first input tensor
- other: the second input tensor
- Keyword args:
- {out}
- Example::
- >>> torch.bitwise_and(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
- tensor([1, 0, 3], dtype=torch.int8)
- >>> torch.bitwise_and(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
- tensor([ False, True, False])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.bitwise_or,
- r"""
- bitwise_or(input, other, *, out=None) -> Tensor
- Computes the bitwise OR of :attr:`input` and :attr:`other`. The input tensor must be of
- integral or Boolean types. For bool tensors, it computes the logical OR.
- Args:
- input: the first input tensor
- other: the second input tensor
- Keyword args:
- {out}
- Example::
- >>> torch.bitwise_or(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
- tensor([-1, -2, 3], dtype=torch.int8)
- >>> torch.bitwise_or(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
- tensor([ True, True, False])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.bitwise_xor,
- r"""
- bitwise_xor(input, other, *, out=None) -> Tensor
- Computes the bitwise XOR of :attr:`input` and :attr:`other`. The input tensor must be of
- integral or Boolean types. For bool tensors, it computes the logical XOR.
- Args:
- input: the first input tensor
- other: the second input tensor
- Keyword args:
- {out}
- Example::
- >>> torch.bitwise_xor(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
- tensor([-2, -2, 0], dtype=torch.int8)
- >>> torch.bitwise_xor(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
- tensor([ True, False, False])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.bitwise_left_shift,
- r"""
- bitwise_left_shift(input, other, *, out=None) -> Tensor
- Computes the left arithmetic shift of :attr:`input` by :attr:`other` bits.
- The input tensor must be of integral type. This operator supports
- :ref:`broadcasting to a common shape <broadcasting-semantics>` and
- :ref:`type promotion <type-promotion-doc>`.
- The operation applied is:
- .. math::
- \text{{out}}_i = \text{{input}}_i << \text{{other}}_i
- Args:
- input (Tensor or Scalar): the first input tensor
- other (Tensor or Scalar): the second input tensor
- Keyword args:
- {out}
- Example::
- >>> torch.bitwise_left_shift(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
- tensor([-2, -2, 24], dtype=torch.int8)
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.bitwise_right_shift,
- r"""
- bitwise_right_shift(input, other, *, out=None) -> Tensor
- Computes the right arithmetic shift of :attr:`input` by :attr:`other` bits.
- The input tensor must be of integral type. This operator supports
- :ref:`broadcasting to a common shape <broadcasting-semantics>` and
- :ref:`type promotion <type-promotion-doc>`.
- The operation applied is:
- .. math::
- \text{{out}}_i = \text{{input}}_i >> \text{{other}}_i
- Args:
- input (Tensor or Scalar): the first input tensor
- other (Tensor or Scalar): the second input tensor
- Keyword args:
- {out}
- Example::
- >>> torch.bitwise_right_shift(torch.tensor([-2, -7, 31], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
- tensor([-1, -7, 3], dtype=torch.int8)
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.broadcast_to,
- r"""
- broadcast_to(input, shape) -> Tensor
- Broadcasts :attr:`input` to the shape :attr:`\shape`.
- Equivalent to calling ``input.expand(shape)``. See :meth:`~Tensor.expand` for details.
- Args:
- {input}
- shape (list, tuple, or :class:`torch.Size`): the new shape.
- Example::
- >>> x = torch.tensor([1, 2, 3])
- >>> torch.broadcast_to(x, (3, 3))
- tensor([[1, 2, 3],
- [1, 2, 3],
- [1, 2, 3]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.stack,
- r"""
- stack(tensors, dim=0, *, out=None) -> Tensor
- Concatenates a sequence of tensors along a new dimension.
- All tensors need to be of the same size.
- Arguments:
- tensors (sequence of Tensors): sequence of tensors to concatenate
- dim (int): dimension to insert. Has to be between 0 and the number
- of dimensions of concatenated tensors (inclusive)
- Keyword args:
- {out}
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.hstack,
- r"""
- hstack(tensors, *, out=None) -> Tensor
- Stack tensors in sequence horizontally (column wise).
- This is equivalent to concatenation along the first axis for 1-D tensors, and along the second axis for all other tensors.
- Args:
- tensors (sequence of Tensors): sequence of tensors to concatenate
- Keyword args:
- {out}
- Example::
- >>> a = torch.tensor([1, 2, 3])
- >>> b = torch.tensor([4, 5, 6])
- >>> torch.hstack((a,b))
- tensor([1, 2, 3, 4, 5, 6])
- >>> a = torch.tensor([[1],[2],[3]])
- >>> b = torch.tensor([[4],[5],[6]])
- >>> torch.hstack((a,b))
- tensor([[1, 4],
- [2, 5],
- [3, 6]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.vstack,
- r"""
- vstack(tensors, *, out=None) -> Tensor
- Stack tensors in sequence vertically (row wise).
- This is equivalent to concatenation along the first axis after all 1-D tensors have been reshaped by :func:`torch.atleast_2d`.
- Args:
- tensors (sequence of Tensors): sequence of tensors to concatenate
- Keyword args:
- {out}
- Example::
- >>> a = torch.tensor([1, 2, 3])
- >>> b = torch.tensor([4, 5, 6])
- >>> torch.vstack((a,b))
- tensor([[1, 2, 3],
- [4, 5, 6]])
- >>> a = torch.tensor([[1],[2],[3]])
- >>> b = torch.tensor([[4],[5],[6]])
- >>> torch.vstack((a,b))
- tensor([[1],
- [2],
- [3],
- [4],
- [5],
- [6]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.dstack,
- r"""
- dstack(tensors, *, out=None) -> Tensor
- Stack tensors in sequence depthwise (along third axis).
- This is equivalent to concatenation along the third axis after 1-D and 2-D tensors have been reshaped by :func:`torch.atleast_3d`.
- Args:
- tensors (sequence of Tensors): sequence of tensors to concatenate
- Keyword args:
- {out}
- Example::
- >>> a = torch.tensor([1, 2, 3])
- >>> b = torch.tensor([4, 5, 6])
- >>> torch.dstack((a,b))
- tensor([[[1, 4],
- [2, 5],
- [3, 6]]])
- >>> a = torch.tensor([[1],[2],[3]])
- >>> b = torch.tensor([[4],[5],[6]])
- >>> torch.dstack((a,b))
- tensor([[[1, 4]],
- [[2, 5]],
- [[3, 6]]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.tensor_split,
- r"""
- tensor_split(input, indices_or_sections, dim=0) -> List of Tensors
- Splits a tensor into multiple sub-tensors, all of which are views of :attr:`input`,
- along dimension :attr:`dim` according to the indices or number of sections specified
- by :attr:`indices_or_sections`. This function is based on NumPy's
- :func:`numpy.array_split`.
- Args:
- input (Tensor): the tensor to split
- indices_or_sections (Tensor, int or list or tuple of ints):
- If :attr:`indices_or_sections` is an integer ``n`` or a zero dimensional long tensor
- with value ``n``, :attr:`input` is split into ``n`` sections along dimension :attr:`dim`.
- If :attr:`input` is divisible by ``n`` along dimension :attr:`dim`, each
- section will be of equal size, :code:`input.size(dim) / n`. If :attr:`input`
- is not divisible by ``n``, the sizes of the first :code:`int(input.size(dim) % n)`
- sections will have size :code:`int(input.size(dim) / n) + 1`, and the rest will
- have size :code:`int(input.size(dim) / n)`.
- If :attr:`indices_or_sections` is a list or tuple of ints, or a one-dimensional long
- tensor, then :attr:`input` is split along dimension :attr:`dim` at each of the indices
- in the list, tuple or tensor. For instance, :code:`indices_or_sections=[2, 3]` and :code:`dim=0`
- would result in the tensors :code:`input[:2]`, :code:`input[2:3]`, and :code:`input[3:]`.
- If :attr:`indices_or_sections` is a tensor, it must be a zero-dimensional or one-dimensional
- long tensor on the CPU.
- dim (int, optional): dimension along which to split the tensor. Default: ``0``
- Example::
- >>> x = torch.arange(8)
- >>> torch.tensor_split(x, 3)
- (tensor([0, 1, 2]), tensor([3, 4, 5]), tensor([6, 7]))
- >>> x = torch.arange(7)
- >>> torch.tensor_split(x, 3)
- (tensor([0, 1, 2]), tensor([3, 4]), tensor([5, 6]))
- >>> torch.tensor_split(x, (1, 6))
- (tensor([0]), tensor([1, 2, 3, 4, 5]), tensor([6]))
- >>> x = torch.arange(14).reshape(2, 7)
- >>> x
- tensor([[ 0, 1, 2, 3, 4, 5, 6],
- [ 7, 8, 9, 10, 11, 12, 13]])
- >>> torch.tensor_split(x, 3, dim=1)
- (tensor([[0, 1, 2],
- [7, 8, 9]]),
- tensor([[ 3, 4],
- [10, 11]]),
- tensor([[ 5, 6],
- [12, 13]]))
- >>> torch.tensor_split(x, (1, 6), dim=1)
- (tensor([[0],
- [7]]),
- tensor([[ 1, 2, 3, 4, 5],
- [ 8, 9, 10, 11, 12]]),
- tensor([[ 6],
- [13]]))
- """,
- )
- add_docstr(
- torch.chunk,
- r"""
- chunk(input, chunks, dim=0) -> List of Tensors
- Attempts to split a tensor into the specified number of chunks. Each chunk is a view of
- the input tensor.
- .. note::
- This function may return fewer than the specified number of chunks!
- .. seealso::
- :func:`torch.tensor_split` a function that always returns exactly the specified number of chunks
- If the tensor size along the given dimension :attr:`dim` is divisible by :attr:`chunks`,
- all returned chunks will be the same size.
- If the tensor size along the given dimension :attr:`dim` is not divisible by :attr:`chunks`,
- all returned chunks will be the same size, except the last one.
- If such division is not possible, this function may return fewer
- than the specified number of chunks.
- Arguments:
- input (Tensor): the tensor to split
- chunks (int): number of chunks to return
- dim (int): dimension along which to split the tensor
- Example:
- >>> torch.arange(11).chunk(6)
- (tensor([0, 1]),
- tensor([2, 3]),
- tensor([4, 5]),
- tensor([6, 7]),
- tensor([8, 9]),
- tensor([10]))
- >>> torch.arange(12).chunk(6)
- (tensor([0, 1]),
- tensor([2, 3]),
- tensor([4, 5]),
- tensor([6, 7]),
- tensor([8, 9]),
- tensor([10, 11]))
- >>> torch.arange(13).chunk(6)
- (tensor([0, 1, 2]),
- tensor([3, 4, 5]),
- tensor([6, 7, 8]),
- tensor([ 9, 10, 11]),
- tensor([12]))
- """,
- )
- add_docstr(
- torch.unsafe_chunk,
- r"""
- unsafe_chunk(input, chunks, dim=0) -> List of Tensors
- Works like :func:`torch.chunk` but without enforcing the autograd restrictions
- on inplace modification of the outputs.
- .. warning::
- This function is safe to use as long as only the input, or only the outputs
- are modified inplace after calling this function. It is user's
- responsibility to ensure that is the case. If both the input and one or more
- of the outputs are modified inplace, gradients computed by autograd will be
- silently incorrect.
- """,
- )
- add_docstr(
- torch.unsafe_split,
- r"""
- unsafe_split(tensor, split_size_or_sections, dim=0) -> List of Tensors
- Works like :func:`torch.split` but without enforcing the autograd restrictions
- on inplace modification of the outputs.
- .. warning::
- This function is safe to use as long as only the input, or only the outputs
- are modified inplace after calling this function. It is user's
- responsibility to ensure that is the case. If both the input and one or more
- of the outputs are modified inplace, gradients computed by autograd will be
- silently incorrect.
- """,
- )
- add_docstr(
- torch.hsplit,
- r"""
- hsplit(input, indices_or_sections) -> List of Tensors
- Splits :attr:`input`, a tensor with one or more dimensions, into multiple tensors
- horizontally according to :attr:`indices_or_sections`. Each split is a view of
- :attr:`input`.
- If :attr:`input` is one dimensional this is equivalent to calling
- torch.tensor_split(input, indices_or_sections, dim=0) (the split dimension is
- zero), and if :attr:`input` has two or more dimensions it's equivalent to calling
- torch.tensor_split(input, indices_or_sections, dim=1) (the split dimension is 1),
- except that if :attr:`indices_or_sections` is an integer it must evenly divide
- the split dimension or a runtime error will be thrown.
- This function is based on NumPy's :func:`numpy.hsplit`.
- Args:
- input (Tensor): tensor to split.
- indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`.
- Example::
- >>> t = torch.arange(16.0).reshape(4,4)
- >>> t
- tensor([[ 0., 1., 2., 3.],
- [ 4., 5., 6., 7.],
- [ 8., 9., 10., 11.],
- [12., 13., 14., 15.]])
- >>> torch.hsplit(t, 2)
- (tensor([[ 0., 1.],
- [ 4., 5.],
- [ 8., 9.],
- [12., 13.]]),
- tensor([[ 2., 3.],
- [ 6., 7.],
- [10., 11.],
- [14., 15.]]))
- >>> torch.hsplit(t, [3, 6])
- (tensor([[ 0., 1., 2.],
- [ 4., 5., 6.],
- [ 8., 9., 10.],
- [12., 13., 14.]]),
- tensor([[ 3.],
- [ 7.],
- [11.],
- [15.]]),
- tensor([], size=(4, 0)))
- """,
- )
- add_docstr(
- torch.vsplit,
- r"""
- vsplit(input, indices_or_sections) -> List of Tensors
- Splits :attr:`input`, a tensor with two or more dimensions, into multiple tensors
- vertically according to :attr:`indices_or_sections`. Each split is a view of
- :attr:`input`.
- This is equivalent to calling torch.tensor_split(input, indices_or_sections, dim=0)
- (the split dimension is 0), except that if :attr:`indices_or_sections` is an integer
- it must evenly divide the split dimension or a runtime error will be thrown.
- This function is based on NumPy's :func:`numpy.vsplit`.
- Args:
- input (Tensor): tensor to split.
- indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`.
- Example::
- >>> t = torch.arange(16.0).reshape(4,4)
- >>> t
- tensor([[ 0., 1., 2., 3.],
- [ 4., 5., 6., 7.],
- [ 8., 9., 10., 11.],
- [12., 13., 14., 15.]])
- >>> torch.vsplit(t, 2)
- (tensor([[0., 1., 2., 3.],
- [4., 5., 6., 7.]]),
- tensor([[ 8., 9., 10., 11.],
- [12., 13., 14., 15.]]))
- >>> torch.vsplit(t, [3, 6])
- (tensor([[ 0., 1., 2., 3.],
- [ 4., 5., 6., 7.],
- [ 8., 9., 10., 11.]]),
- tensor([[12., 13., 14., 15.]]),
- tensor([], size=(0, 4)))
- """,
- )
- add_docstr(
- torch.dsplit,
- r"""
- dsplit(input, indices_or_sections) -> List of Tensors
- Splits :attr:`input`, a tensor with three or more dimensions, into multiple tensors
- depthwise according to :attr:`indices_or_sections`. Each split is a view of
- :attr:`input`.
- This is equivalent to calling torch.tensor_split(input, indices_or_sections, dim=2)
- (the split dimension is 2), except that if :attr:`indices_or_sections` is an integer
- it must evenly divide the split dimension or a runtime error will be thrown.
- This function is based on NumPy's :func:`numpy.dsplit`.
- Args:
- input (Tensor): tensor to split.
- indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`.
- Example::
- >>> t = torch.arange(16.0).reshape(2, 2, 4)
- >>> t
- tensor([[[ 0., 1., 2., 3.],
- [ 4., 5., 6., 7.]],
- [[ 8., 9., 10., 11.],
- [12., 13., 14., 15.]]])
- >>> torch.dsplit(t, 2)
- (tensor([[[ 0., 1.],
- [ 4., 5.]],
- [[ 8., 9.],
- [12., 13.]]]),
- tensor([[[ 2., 3.],
- [ 6., 7.]],
- [[10., 11.],
- [14., 15.]]]))
- >>> torch.dsplit(t, [3, 6])
- (tensor([[[ 0., 1., 2.],
- [ 4., 5., 6.]],
- [[ 8., 9., 10.],
- [12., 13., 14.]]]),
- tensor([[[ 3.],
- [ 7.]],
- [[11.],
- [15.]]]),
- tensor([], size=(2, 2, 0)))
- """,
- )
- add_docstr(
- torch.can_cast,
- r"""
- can_cast(from, to) -> bool
- Determines if a type conversion is allowed under PyTorch casting rules
- described in the type promotion :ref:`documentation <type-promotion-doc>`.
- Args:
- from (dtype): The original :class:`torch.dtype`.
- to (dtype): The target :class:`torch.dtype`.
- Example::
- >>> torch.can_cast(torch.double, torch.float)
- True
- >>> torch.can_cast(torch.float, torch.int)
- False
- """,
- )
- add_docstr(
- torch.corrcoef,
- r"""
- corrcoef(input) -> Tensor
- Estimates the Pearson product-moment correlation coefficient matrix of the variables given by the :attr:`input` matrix,
- where rows are the variables and columns are the observations.
- .. note::
- The correlation coefficient matrix R is computed using the covariance matrix C as given by
- :math:`R_{ij} = \frac{ C_{ij} } { \sqrt{ C_{ii} * C_{jj} } }`
- .. note::
- Due to floating point rounding, the resulting array may not be Hermitian and its diagonal elements may not be 1.
- The real and imaginary values are clipped to the interval [-1, 1] in an attempt to improve this situation.
- Args:
- input (Tensor): A 2D matrix containing multiple variables and observations, or a
- Scalar or 1D vector representing a single variable.
- Returns:
- (Tensor) The correlation coefficient matrix of the variables.
- .. seealso::
- :func:`torch.cov` covariance matrix.
- Example::
- >>> x = torch.tensor([[0, 1, 2], [2, 1, 0]])
- >>> torch.corrcoef(x)
- tensor([[ 1., -1.],
- [-1., 1.]])
- >>> x = torch.randn(2, 4)
- >>> x
- tensor([[-0.2678, -0.0908, -0.3766, 0.2780],
- [-0.5812, 0.1535, 0.2387, 0.2350]])
- >>> torch.corrcoef(x)
- tensor([[1.0000, 0.3582],
- [0.3582, 1.0000]])
- >>> torch.corrcoef(x[0])
- tensor(1.)
- """,
- )
- add_docstr(
- torch.cov,
- r"""
- cov(input, *, correction=1, fweights=None, aweights=None) -> Tensor
- Estimates the covariance matrix of the variables given by the :attr:`input` matrix, where rows are
- the variables and columns are the observations.
- A covariance matrix is a square matrix giving the covariance of each pair of variables. The diagonal contains
- the variance of each variable (covariance of a variable with itself). By definition, if :attr:`input` represents
- a single variable (Scalar or 1D) then its variance is returned.
- The unbiased sample covariance of the variables :math:`x` and :math:`y` is given by:
- .. math::
- \text{cov}_w(x,y) = \frac{\sum^{N}_{i = 1}(x_{i} - \bar{x})(y_{i} - \bar{y})}{N~-~1}
- where :math:`\bar{x}` and :math:`\bar{y}` are the simple means of the :math:`x` and :math:`y` respectively.
- If :attr:`fweights` and/or :attr:`aweights` are provided, the unbiased weighted covariance
- is calculated, which is given by:
- .. math::
- \text{cov}_w(x,y) = \frac{\sum^{N}_{i = 1}w_i(x_{i} - \mu_x^*)(y_{i} - \mu_y^*)}{\sum^{N}_{i = 1}w_i~-~1}
- where :math:`w` denotes :attr:`fweights` or :attr:`aweights` based on whichever is provided, or
- :math:`w = fweights \times aweights` if both are provided, and
- :math:`\mu_x^* = \frac{\sum^{N}_{i = 1}w_ix_{i} }{\sum^{N}_{i = 1}w_i}` is the weighted mean of the variable.
- Args:
- input (Tensor): A 2D matrix containing multiple variables and observations, or a
- Scalar or 1D vector representing a single variable.
- Keyword Args:
- correction (int, optional): difference between the sample size and sample degrees of freedom.
- Defaults to Bessel's correction, ``correction = 1`` which returns the unbiased estimate,
- even if both :attr:`fweights` and :attr:`aweights` are specified. ``correction = 0``
- will return the simple average. Defaults to ``1``.
- fweights (tensor, optional): A Scalar or 1D tensor of observation vector frequencies representing the number of
- times each observation should be repeated. Its numel must equal the number of columns of :attr:`input`.
- Must have integral dtype. Ignored if ``None``. `Defaults to ``None``.
- aweights (tensor, optional): A Scalar or 1D array of observation vector weights.
- These relative weights are typically large for observations considered “important” and smaller for
- observations considered less “important”. Its numel must equal the number of columns of :attr:`input`.
- Must have floating point dtype. Ignored if ``None``. `Defaults to ``None``.
- Returns:
- (Tensor) The covariance matrix of the variables.
- .. seealso::
- :func:`torch.corrcoef` normalized covariance matrix.
- Example::
- >>> x = torch.tensor([[0, 2], [1, 1], [2, 0]]).T
- >>> x
- tensor([[0, 1, 2],
- [2, 1, 0]])
- >>> torch.cov(x)
- tensor([[ 1., -1.],
- [-1., 1.]])
- >>> torch.cov(x, correction=0)
- tensor([[ 0.6667, -0.6667],
- [-0.6667, 0.6667]])
- >>> fw = torch.randint(1, 10, (3,))
- >>> fw
- tensor([1, 6, 9])
- >>> aw = torch.rand(3)
- >>> aw
- tensor([0.4282, 0.0255, 0.4144])
- >>> torch.cov(x, fweights=fw, aweights=aw)
- tensor([[ 0.4169, -0.4169],
- [-0.4169, 0.4169]])
- """,
- )
- add_docstr(
- torch.cat,
- r"""
- cat(tensors, dim=0, *, out=None) -> Tensor
- Concatenates the given sequence of :attr:`seq` tensors in the given dimension.
- All tensors must either have the same shape (except in the concatenating
- dimension) or be empty.
- :func:`torch.cat` can be seen as an inverse operation for :func:`torch.split`
- and :func:`torch.chunk`.
- :func:`torch.cat` can be best understood via examples.
- Args:
- tensors (sequence of Tensors): any python sequence of tensors of the same type.
- Non-empty tensors provided must have the same shape, except in the
- cat dimension.
- dim (int, optional): the dimension over which the tensors are concatenated
- Keyword args:
- {out}
- Example::
- >>> x = torch.randn(2, 3)
- >>> x
- tensor([[ 0.6580, -1.0969, -0.4614],
- [-0.1034, -0.5790, 0.1497]])
- >>> torch.cat((x, x, x), 0)
- tensor([[ 0.6580, -1.0969, -0.4614],
- [-0.1034, -0.5790, 0.1497],
- [ 0.6580, -1.0969, -0.4614],
- [-0.1034, -0.5790, 0.1497],
- [ 0.6580, -1.0969, -0.4614],
- [-0.1034, -0.5790, 0.1497]])
- >>> torch.cat((x, x, x), 1)
- tensor([[ 0.6580, -1.0969, -0.4614, 0.6580, -1.0969, -0.4614, 0.6580,
- -1.0969, -0.4614],
- [-0.1034, -0.5790, 0.1497, -0.1034, -0.5790, 0.1497, -0.1034,
- -0.5790, 0.1497]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.concat,
- r"""
- concat(tensors, dim=0, *, out=None) -> Tensor
- Alias of :func:`torch.cat`.
- """,
- )
- add_docstr(
- torch.concatenate,
- r"""
- concatenate(tensors, axis=0, out=None) -> Tensor
- Alias of :func:`torch.cat`.
- """,
- )
- add_docstr(
- torch.ceil,
- r"""
- ceil(input, *, out=None) -> Tensor
- Returns a new tensor with the ceil of the elements of :attr:`input`,
- the smallest integer greater than or equal to each element.
- For integer inputs, follows the array-api convention of returning a
- copy of the input tensor.
- .. math::
- \text{out}_{i} = \left\lceil \text{input}_{i} \right\rceil
- """
- + r"""
- Args:
- {input}
- Keyword args:
- {out}
- Example::
- >>> a = torch.randn(4)
- >>> a
- tensor([-0.6341, -1.4208, -1.0900, 0.5826])
- >>> torch.ceil(a)
- tensor([-0., -1., -1., 1.])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.real,
- r"""
- real(input) -> Tensor
- Returns a new tensor containing real values of the :attr:`self` tensor.
- The returned tensor and :attr:`self` share the same underlying storage.
- Args:
- {input}
- Example::
- >>> x=torch.randn(4, dtype=torch.cfloat)
- >>> x
- tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)])
- >>> x.real
- tensor([ 0.3100, -0.5445, -1.6492, -0.0638])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.imag,
- r"""
- imag(input) -> Tensor
- Returns a new tensor containing imaginary values of the :attr:`self` tensor.
- The returned tensor and :attr:`self` share the same underlying storage.
- .. warning::
- :func:`imag` is only supported for tensors with complex dtypes.
- Args:
- {input}
- Example::
- >>> x=torch.randn(4, dtype=torch.cfloat)
- >>> x
- tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)])
- >>> x.imag
- tensor([ 0.3553, -0.7896, -0.0633, -0.8119])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.view_as_real,
- r"""
- view_as_real(input) -> Tensor
- Returns a view of :attr:`input` as a real tensor. For an input complex tensor of
- :attr:`size` :math:`m1, m2, \dots, mi`, this function returns a new
- real tensor of size :math:`m1, m2, \dots, mi, 2`, where the last dimension of size 2
- represents the real and imaginary components of complex numbers.
- .. warning::
- :func:`view_as_real` is only supported for tensors with ``complex dtypes``.
- Args:
- {input}
- Example::
- >>> x=torch.randn(4, dtype=torch.cfloat)
- >>> x
- tensor([(0.4737-0.3839j), (-0.2098-0.6699j), (0.3470-0.9451j), (-0.5174-1.3136j)])
- >>> torch.view_as_real(x)
- tensor([[ 0.4737, -0.3839],
- [-0.2098, -0.6699],
- [ 0.3470, -0.9451],
- [-0.5174, -1.3136]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.view_as_complex,
- r"""
- view_as_complex(input) -> Tensor
- Returns a view of :attr:`input` as a complex tensor. For an input complex
- tensor of :attr:`size` :math:`m1, m2, \dots, mi, 2`, this function returns a
- new complex tensor of :attr:`size` :math:`m1, m2, \dots, mi` where the last
- dimension of the input tensor is expected to represent the real and imaginary
- components of complex numbers.
- .. warning::
- :func:`view_as_complex` is only supported for tensors with
- :class:`torch.dtype` ``torch.float64`` and ``torch.float32``. The input is
- expected to have the last dimension of :attr:`size` 2. In addition, the
- tensor must have a `stride` of 1 for its last dimension. The strides of all
- other dimensions must be even numbers.
- Args:
- {input}
- Example::
- >>> x=torch.randn(4, 2)
- >>> x
- tensor([[ 1.6116, -0.5772],
- [-1.4606, -0.9120],
- [ 0.0786, -1.7497],
- [-0.6561, -1.6623]])
- >>> torch.view_as_complex(x)
- tensor([(1.6116-0.5772j), (-1.4606-0.9120j), (0.0786-1.7497j), (-0.6561-1.6623j)])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.reciprocal,
- r"""
- reciprocal(input, *, out=None) -> Tensor
- Returns a new tensor with the reciprocal of the elements of :attr:`input`
- .. math::
- \text{out}_{i} = \frac{1}{\text{input}_{i}}
- .. note::
- Unlike NumPy's reciprocal, torch.reciprocal supports integral inputs. Integral
- inputs to reciprocal are automatically :ref:`promoted <type-promotion-doc>` to
- the default scalar type.
- """
- + r"""
- Args:
- {input}
- Keyword args:
- {out}
- Example::
- >>> a = torch.randn(4)
- >>> a
- tensor([-0.4595, -2.1219, -1.4314, 0.7298])
- >>> torch.reciprocal(a)
- tensor([-2.1763, -0.4713, -0.6986, 1.3702])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.cholesky,
- r"""
- cholesky(input, upper=False, *, out=None) -> Tensor
- Computes the Cholesky decomposition of a symmetric positive-definite
- matrix :math:`A` or for batches of symmetric positive-definite matrices.
- If :attr:`upper` is ``True``, the returned matrix ``U`` is upper-triangular, and
- the decomposition has the form:
- .. math::
- A = U^TU
- If :attr:`upper` is ``False``, the returned matrix ``L`` is lower-triangular, and
- the decomposition has the form:
- .. math::
- A = LL^T
- If :attr:`upper` is ``True``, and :math:`A` is a batch of symmetric positive-definite
- matrices, then the returned tensor will be composed of upper-triangular Cholesky factors
- of each of the individual matrices. Similarly, when :attr:`upper` is ``False``, the returned
- tensor will be composed of lower-triangular Cholesky factors of each of the individual
- matrices.
- .. warning::
- :func:`torch.cholesky` is deprecated in favor of :func:`torch.linalg.cholesky`
- and will be removed in a future PyTorch release.
- ``L = torch.cholesky(A)`` should be replaced with
- .. code:: python
- L = torch.linalg.cholesky(A)
- ``U = torch.cholesky(A, upper=True)`` should be replaced with
- .. code:: python
- U = torch.linalg.cholesky(A).mH
- This transform will produce equivalent results for all valid (symmetric positive definite) inputs.
- Args:
- input (Tensor): the input tensor :math:`A` of size :math:`(*, n, n)` where `*` is zero or more
- batch dimensions consisting of symmetric positive-definite matrices.
- upper (bool, optional): flag that indicates whether to return a
- upper or lower triangular matrix. Default: ``False``
- Keyword args:
- out (Tensor, optional): the output matrix
- Example::
- >>> a = torch.randn(3, 3)
- >>> a = a @ a.mT + 1e-3 # make symmetric positive-definite
- >>> l = torch.cholesky(a)
- >>> a
- tensor([[ 2.4112, -0.7486, 1.4551],
- [-0.7486, 1.3544, 0.1294],
- [ 1.4551, 0.1294, 1.6724]])
- >>> l
- tensor([[ 1.5528, 0.0000, 0.0000],
- [-0.4821, 1.0592, 0.0000],
- [ 0.9371, 0.5487, 0.7023]])
- >>> l @ l.mT
- tensor([[ 2.4112, -0.7486, 1.4551],
- [-0.7486, 1.3544, 0.1294],
- [ 1.4551, 0.1294, 1.6724]])
- >>> a = torch.randn(3, 2, 2) # Example for batched input
- >>> a = a @ a.mT + 1e-03 # make symmetric positive-definite
- >>> l = torch.cholesky(a)
- >>> z = l @ l.mT
- >>> torch.dist(z, a)
- tensor(2.3842e-07)
- """,
- )
- add_docstr(
- torch.cholesky_solve,
- r"""
- cholesky_solve(input, input2, upper=False, *, out=None) -> Tensor
- Solves a linear system of equations with a positive semidefinite
- matrix to be inverted given its Cholesky factor matrix :math:`u`.
- If :attr:`upper` is ``False``, :math:`u` is and lower triangular and `c` is
- returned such that:
- .. math::
- c = (u u^T)^{{-1}} b
- If :attr:`upper` is ``True`` or not provided, :math:`u` is upper triangular
- and `c` is returned such that:
- .. math::
- c = (u^T u)^{{-1}} b
- `torch.cholesky_solve(b, u)` can take in 2D inputs `b, u` or inputs that are
- batches of 2D matrices. If the inputs are batches, then returns
- batched outputs `c`
- Supports real-valued and complex-valued inputs.
- For the complex-valued inputs the transpose operator above is the conjugate transpose.
- Args:
- input (Tensor): input matrix :math:`b` of size :math:`(*, m, k)`,
- where :math:`*` is zero or more batch dimensions
- input2 (Tensor): input matrix :math:`u` of size :math:`(*, m, m)`,
- where :math:`*` is zero of more batch dimensions composed of
- upper or lower triangular Cholesky factor
- upper (bool, optional): whether to consider the Cholesky factor as a
- lower or upper triangular matrix. Default: ``False``.
- Keyword args:
- out (Tensor, optional): the output tensor for `c`
- Example::
- >>> a = torch.randn(3, 3)
- >>> a = torch.mm(a, a.t()) # make symmetric positive definite
- >>> u = torch.linalg.cholesky(a)
- >>> a
- tensor([[ 0.7747, -1.9549, 1.3086],
- [-1.9549, 6.7546, -5.4114],
- [ 1.3086, -5.4114, 4.8733]])
- >>> b = torch.randn(3, 2)
- >>> b
- tensor([[-0.6355, 0.9891],
- [ 0.1974, 1.4706],
- [-0.4115, -0.6225]])
- >>> torch.cholesky_solve(b, u)
- tensor([[ -8.1625, 19.6097],
- [ -5.8398, 14.2387],
- [ -4.3771, 10.4173]])
- >>> torch.mm(a.inverse(), b)
- tensor([[ -8.1626, 19.6097],
- [ -5.8398, 14.2387],
- [ -4.3771, 10.4173]])
- """,
- )
- add_docstr(
- torch.cholesky_inverse,
- r"""
- cholesky_inverse(input, upper=False, *, out=None) -> Tensor
- Computes the inverse of a symmetric positive-definite matrix :math:`A` using its
- Cholesky factor :math:`u`: returns matrix ``inv``. The inverse is computed using
- LAPACK routines ``dpotri`` and ``spotri`` (and the corresponding MAGMA routines).
- If :attr:`upper` is ``False``, :math:`u` is lower triangular
- such that the returned tensor is
- .. math::
- inv = (uu^{{T}})^{{-1}}
- If :attr:`upper` is ``True`` or not provided, :math:`u` is upper
- triangular such that the returned tensor is
- .. math::
- inv = (u^T u)^{{-1}}
- Supports input of float, double, cfloat and cdouble dtypes.
- Also supports batches of matrices, and if :math:`A` is a batch of matrices then the output has the same batch dimensions.
- Args:
- input (Tensor): the input tensor :math:`A` of size :math:`(*, n, n)`,
- consisting of symmetric positive-definite matrices
- where :math:`*` is zero or more batch dimensions.
- upper (bool, optional): flag that indicates whether to return a
- upper or lower triangular matrix. Default: False
- Keyword args:
- out (Tensor, optional): the output tensor for `inv`
- Example::
- >>> a = torch.randn(3, 3)
- >>> a = torch.mm(a, a.t()) + 1e-05 * torch.eye(3) # make symmetric positive definite
- >>> u = torch.linalg.cholesky(a)
- >>> a
- tensor([[ 0.9935, -0.6353, 1.5806],
- [ -0.6353, 0.8769, -1.7183],
- [ 1.5806, -1.7183, 10.6618]])
- >>> torch.cholesky_inverse(u)
- tensor([[ 1.9314, 1.2251, -0.0889],
- [ 1.2251, 2.4439, 0.2122],
- [-0.0889, 0.2122, 0.1412]])
- >>> a.inverse()
- tensor([[ 1.9314, 1.2251, -0.0889],
- [ 1.2251, 2.4439, 0.2122],
- [-0.0889, 0.2122, 0.1412]])
- >>> a = torch.randn(3, 2, 2) # Example for batched input
- >>> a = a @ a.mT + 1e-03 # make symmetric positive-definite
- >>> l = torch.linalg.cholesky(a)
- >>> z = l @ l.mT
- >>> torch.dist(z, a)
- tensor(3.5894e-07)
- """,
- )
- add_docstr(
- torch.clone,
- r"""
- clone(input, *, memory_format=torch.preserve_format) -> Tensor
- Returns a copy of :attr:`input`.
- .. note::
- This function is differentiable, so gradients will flow back from the
- result of this operation to :attr:`input`. To create a tensor without an
- autograd relationship to :attr:`input` see :meth:`~Tensor.detach`.
- Args:
- {input}
- Keyword args:
- {memory_format}
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.clamp,
- r"""
- clamp(input, min=None, max=None, *, out=None) -> Tensor
- Clamps all elements in :attr:`input` into the range `[` :attr:`min`, :attr:`max` `]`.
- Letting min_value and max_value be :attr:`min` and :attr:`max`, respectively, this returns:
- .. math::
- y_i = \min(\max(x_i, \text{min\_value}_i), \text{max\_value}_i)
- If :attr:`min` is ``None``, there is no lower bound.
- Or, if :attr:`max` is ``None`` there is no upper bound.
- """
- + r"""
- .. note::
- If :attr:`min` is greater than :attr:`max` :func:`torch.clamp(..., min, max) <torch.clamp>`
- sets all elements in :attr:`input` to the value of :attr:`max`.
- Args:
- {input}
- min (Number or Tensor, optional): lower-bound of the range to be clamped to
- max (Number or Tensor, optional): upper-bound of the range to be clamped to
- Keyword args:
- {out}
- Example::
- >>> a = torch.randn(4)
- >>> a
- tensor([-1.7120, 0.1734, -0.0478, -0.0922])
- >>> torch.clamp(a, min=-0.5, max=0.5)
- tensor([-0.5000, 0.1734, -0.0478, -0.0922])
- >>> min = torch.linspace(-1, 1, steps=4)
- >>> torch.clamp(a, min=min)
- tensor([-1.0000, 0.1734, 0.3333, 1.0000])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.clip,
- r"""
- clip(input, min=None, max=None, *, out=None) -> Tensor
- Alias for :func:`torch.clamp`.
- """,
- )
- add_docstr(
- torch.column_stack,
- r"""
- column_stack(tensors, *, out=None) -> Tensor
- Creates a new tensor by horizontally stacking the tensors in :attr:`tensors`.
- Equivalent to ``torch.hstack(tensors)``, except each zero or one dimensional tensor ``t``
- in :attr:`tensors` is first reshaped into a ``(t.numel(), 1)`` column before being stacked horizontally.
- Args:
- tensors (sequence of Tensors): sequence of tensors to concatenate
- Keyword args:
- {out}
- Example::
- >>> a = torch.tensor([1, 2, 3])
- >>> b = torch.tensor([4, 5, 6])
- >>> torch.column_stack((a, b))
- tensor([[1, 4],
- [2, 5],
- [3, 6]])
- >>> a = torch.arange(5)
- >>> b = torch.arange(10).reshape(5, 2)
- >>> torch.column_stack((a, b, b))
- tensor([[0, 0, 1, 0, 1],
- [1, 2, 3, 2, 3],
- [2, 4, 5, 4, 5],
- [3, 6, 7, 6, 7],
- [4, 8, 9, 8, 9]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.complex,
- r"""
- complex(real, imag, *, out=None) -> Tensor
- Constructs a complex tensor with its real part equal to :attr:`real` and its
- imaginary part equal to :attr:`imag`.
- Args:
- real (Tensor): The real part of the complex tensor. Must be float or double.
- imag (Tensor): The imaginary part of the complex tensor. Must be same dtype
- as :attr:`real`.
- Keyword args:
- out (Tensor): If the inputs are ``torch.float32``, must be
- ``torch.complex64``. If the inputs are ``torch.float64``, must be
- ``torch.complex128``.
- Example::
- >>> real = torch.tensor([1, 2], dtype=torch.float32)
- >>> imag = torch.tensor([3, 4], dtype=torch.float32)
- >>> z = torch.complex(real, imag)
- >>> z
- tensor([(1.+3.j), (2.+4.j)])
- >>> z.dtype
- torch.complex64
- """,
- )
- add_docstr(
- torch.polar,
- r"""
- polar(abs, angle, *, out=None) -> Tensor
- Constructs a complex tensor whose elements are Cartesian coordinates
- corresponding to the polar coordinates with absolute value :attr:`abs` and angle
- :attr:`angle`.
- .. math::
- \text{out} = \text{abs} \cdot \cos(\text{angle}) + \text{abs} \cdot \sin(\text{angle}) \cdot j
- .. note::
- `torch.polar` is similar to
- `std::polar <https://en.cppreference.com/w/cpp/numeric/complex/polar>`_
- and does not compute the polar decomposition
- of a complex tensor like Python's `cmath.polar` and SciPy's `linalg.polar` do.
- The behavior of this function is undefined if `abs` is negative or NaN, or if `angle` is
- infinite.
- """
- + r"""
- Args:
- abs (Tensor): The absolute value the complex tensor. Must be float or double.
- angle (Tensor): The angle of the complex tensor. Must be same dtype as
- :attr:`abs`.
- Keyword args:
- out (Tensor): If the inputs are ``torch.float32``, must be
- ``torch.complex64``. If the inputs are ``torch.float64``, must be
- ``torch.complex128``.
- Example::
- >>> import numpy as np
- >>> abs = torch.tensor([1, 2], dtype=torch.float64)
- >>> angle = torch.tensor([np.pi / 2, 5 * np.pi / 4], dtype=torch.float64)
- >>> z = torch.polar(abs, angle)
- >>> z
- tensor([(0.0000+1.0000j), (-1.4142-1.4142j)], dtype=torch.complex128)
- """,
- )
- add_docstr(
- torch.conj_physical,
- r"""
- conj_physical(input, *, out=None) -> Tensor
- Computes the element-wise conjugate of the given :attr:`input` tensor.
- If :attr:`input` has a non-complex dtype, this function just returns :attr:`input`.
- .. note::
- This performs the conjugate operation regardless of the fact conjugate bit is set or not.
- .. warning:: In the future, :func:`torch.conj_physical` may return a non-writeable view for an :attr:`input` of
- non-complex dtype. It's recommended that programs not modify the tensor returned by :func:`torch.conj_physical`
- when :attr:`input` is of non-complex dtype to be compatible with this change.
- .. math::
- \text{out}_{i} = conj(\text{input}_{i})
- """
- + r"""
- Args:
- {input}
- Keyword args:
- {out}
- Example::
- >>> torch.conj_physical(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]))
- tensor([-1 - 1j, -2 - 2j, 3 + 3j])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.conj,
- r"""
- conj(input) -> Tensor
- Returns a view of :attr:`input` with a flipped conjugate bit. If :attr:`input` has a non-complex dtype,
- this function just returns :attr:`input`.
- .. note::
- :func:`torch.conj` performs a lazy conjugation, but the actual conjugated tensor can be materialized
- at any time using :func:`torch.resolve_conj`.
- .. warning:: In the future, :func:`torch.conj` may return a non-writeable view for an :attr:`input` of
- non-complex dtype. It's recommended that programs not modify the tensor returned by :func:`torch.conj_physical`
- when :attr:`input` is of non-complex dtype to be compatible with this change.
- Args:
- {input}
- Example::
- >>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])
- >>> x.is_conj()
- False
- >>> y = torch.conj(x)
- >>> y.is_conj()
- True
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.resolve_conj,
- r"""
- resolve_conj(input) -> Tensor
- Returns a new tensor with materialized conjugation if :attr:`input`'s conjugate bit is set to `True`,
- else returns :attr:`input`. The output tensor will always have its conjugate bit set to `False`.
- Args:
- {input}
- Example::
- >>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])
- >>> y = x.conj()
- >>> y.is_conj()
- True
- >>> z = y.resolve_conj()
- >>> z
- tensor([-1 - 1j, -2 - 2j, 3 + 3j])
- >>> z.is_conj()
- False
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.resolve_neg,
- r"""
- resolve_neg(input) -> Tensor
- Returns a new tensor with materialized negation if :attr:`input`'s negative bit is set to `True`,
- else returns :attr:`input`. The output tensor will always have its negative bit set to `False`.
- Args:
- {input}
- Example::
- >>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])
- >>> y = x.conj()
- >>> z = y.imag
- >>> z.is_neg()
- True
- >>> out = y.resolve_neg()
- >>> out
- tensor([-1, -2, -3])
- >>> out.is_neg()
- False
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.copysign,
- r"""
- copysign(input, other, *, out=None) -> Tensor
- Create a new floating-point tensor with the magnitude of :attr:`input` and the sign of :attr:`other`, elementwise.
- .. math::
- \text{out}_{i} = \begin{cases}
- -|\text{input}_{i}| & \text{if } \text{other}_{i} \leq -0.0 \\
- |\text{input}_{i}| & \text{if } \text{other}_{i} \geq 0.0 \\
- \end{cases}
- """
- + r"""
- Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
- and integer and float inputs.
- Args:
- input (Tensor): magnitudes.
- other (Tensor or Number): contains value(s) whose signbit(s) are
- applied to the magnitudes in :attr:`input`.
- Keyword args:
- {out}
- Example::
- >>> a = torch.randn(5)
- >>> a
- tensor([-1.2557, -0.0026, -0.5387, 0.4740, -0.9244])
- >>> torch.copysign(a, 1)
- tensor([1.2557, 0.0026, 0.5387, 0.4740, 0.9244])
- >>> a = torch.randn(4, 4)
- >>> a
- tensor([[ 0.7079, 0.2778, -1.0249, 0.5719],
- [-0.0059, -0.2600, -0.4475, -1.3948],
- [ 0.3667, -0.9567, -2.5757, -0.1751],
- [ 0.2046, -0.0742, 0.2998, -0.1054]])
- >>> b = torch.randn(4)
- tensor([ 0.2373, 0.3120, 0.3190, -1.1128])
- >>> torch.copysign(a, b)
- tensor([[ 0.7079, 0.2778, 1.0249, -0.5719],
- [ 0.0059, 0.2600, 0.4475, -1.3948],
- [ 0.3667, 0.9567, 2.5757, -0.1751],
- [ 0.2046, 0.0742, 0.2998, -0.1054]])
- >>> a = torch.tensor([1.])
- >>> b = torch.tensor([-0.])
- >>> torch.copysign(a, b)
- tensor([-1.])
- .. note::
- copysign handles signed zeros. If the other argument has a negative zero (-0),
- the corresponding output value will be negative.
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.cos,
- r"""
- cos(input, *, out=None) -> Tensor
- Returns a new tensor with the cosine of the elements of :attr:`input`.
- .. math::
- \text{out}_{i} = \cos(\text{input}_{i})
- """
- + r"""
- Args:
- {input}
- Keyword args:
- {out}
- Example::
- >>> a = torch.randn(4)
- >>> a
- tensor([ 1.4309, 1.2706, -0.8562, 0.9796])
- >>> torch.cos(a)
- tensor([ 0.1395, 0.2957, 0.6553, 0.5574])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.cosh,
- r"""
- cosh(input, *, out=None) -> Tensor
- Returns a new tensor with the hyperbolic cosine of the elements of
- :attr:`input`.
- .. math::
- \text{out}_{i} = \cosh(\text{input}_{i})
- """
- + r"""
- Args:
- {input}
- Keyword args:
- {out}
- Example::
- >>> a = torch.randn(4)
- >>> a
- tensor([ 0.1632, 1.1835, -0.6979, -0.7325])
- >>> torch.cosh(a)
- tensor([ 1.0133, 1.7860, 1.2536, 1.2805])
- .. note::
- When :attr:`input` is on the CPU, the implementation of torch.cosh may use
- the Sleef library, which rounds very large results to infinity or negative
- infinity. See `here <https://sleef.org/purec.xhtml>`_ for details.
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.cross,
- r"""
- cross(input, other, dim=None, *, out=None) -> Tensor
- Returns the cross product of vectors in dimension :attr:`dim` of :attr:`input`
- and :attr:`other`.
- Supports input of float, double, cfloat and cdouble dtypes. Also supports batches
- of vectors, for which it computes the product along the dimension :attr:`dim`.
- In this case, the output has the same batch dimensions as the inputs.
- If :attr:`dim` is not given, it defaults to the first dimension found with the
- size 3. Note that this might be unexpected.
- .. seealso::
- :func:`torch.linalg.cross` which requires specifying dim (defaulting to -1).
- .. warning:: This function may change in a future PyTorch release to match
- the default behaviour in :func:`torch.linalg.cross`. We recommend using
- :func:`torch.linalg.cross`.
- Args:
- {input}
- other (Tensor): the second input tensor
- dim (int, optional): the dimension to take the cross-product in.
- Keyword args:
- {out}
- Example::
- >>> a = torch.randn(4, 3)
- >>> a
- tensor([[-0.3956, 1.1455, 1.6895],
- [-0.5849, 1.3672, 0.3599],
- [-1.1626, 0.7180, -0.0521],
- [-0.1339, 0.9902, -2.0225]])
- >>> b = torch.randn(4, 3)
- >>> b
- tensor([[-0.0257, -1.4725, -1.2251],
- [-1.1479, -0.7005, -1.9757],
- [-1.3904, 0.3726, -1.1836],
- [-0.9688, -0.7153, 0.2159]])
- >>> torch.cross(a, b, dim=1)
- tensor([[ 1.0844, -0.5281, 0.6120],
- [-2.4490, -1.5687, 1.9792],
- [-0.8304, -1.3037, 0.5650],
- [-1.2329, 1.9883, 1.0551]])
- >>> torch.cross(a, b)
- tensor([[ 1.0844, -0.5281, 0.6120],
- [-2.4490, -1.5687, 1.9792],
- [-0.8304, -1.3037, 0.5650],
- [-1.2329, 1.9883, 1.0551]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.logcumsumexp,
- r"""
- logcumsumexp(input, dim, *, out=None) -> Tensor
- Returns the logarithm of the cumulative summation of the exponentiation of
- elements of :attr:`input` in the dimension :attr:`dim`.
- For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is
- .. math::
- \text{{logcumsumexp}}(x)_{{ij}} = \log \sum\limits_{{j=0}}^{{i}} \exp(x_{{ij}})
- Args:
- {input}
- dim (int): the dimension to do the operation over
- Keyword args:
- {out}
- Example::
- >>> a = torch.randn(10)
- >>> torch.logcumsumexp(a, dim=0)
- tensor([-0.42296738, -0.04462666, 0.86278635, 0.94622083, 1.05277811,
- 1.39202815, 1.83525007, 1.84492621, 2.06084887, 2.06844475]))
- """.format(
- **reduceops_common_args
- ),
- )
- add_docstr(
- torch.cummax,
- r"""
- cummax(input, dim, *, out=None) -> (Tensor, LongTensor)
- Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative maximum of
- elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index
- location of each maximum value found in the dimension :attr:`dim`.
- .. math::
- y_i = max(x_1, x_2, x_3, \dots, x_i)
- Args:
- {input}
- dim (int): the dimension to do the operation over
- Keyword args:
- out (tuple, optional): the result tuple of two output tensors (values, indices)
- Example::
- >>> a = torch.randn(10)
- >>> a
- tensor([-0.3449, -1.5447, 0.0685, -1.5104, -1.1706, 0.2259, 1.4696, -1.3284,
- 1.9946, -0.8209])
- >>> torch.cummax(a, dim=0)
- torch.return_types.cummax(
- values=tensor([-0.3449, -0.3449, 0.0685, 0.0685, 0.0685, 0.2259, 1.4696, 1.4696,
- 1.9946, 1.9946]),
- indices=tensor([0, 0, 2, 2, 2, 5, 6, 6, 8, 8]))
- """.format(
- **reduceops_common_args
- ),
- )
- add_docstr(
- torch.cummin,
- r"""
- cummin(input, dim, *, out=None) -> (Tensor, LongTensor)
- Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative minimum of
- elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index
- location of each maximum value found in the dimension :attr:`dim`.
- .. math::
- y_i = min(x_1, x_2, x_3, \dots, x_i)
- Args:
- {input}
- dim (int): the dimension to do the operation over
- Keyword args:
- out (tuple, optional): the result tuple of two output tensors (values, indices)
- Example::
- >>> a = torch.randn(10)
- >>> a
- tensor([-0.2284, -0.6628, 0.0975, 0.2680, -1.3298, -0.4220, -0.3885, 1.1762,
- 0.9165, 1.6684])
- >>> torch.cummin(a, dim=0)
- torch.return_types.cummin(
- values=tensor([-0.2284, -0.6628, -0.6628, -0.6628, -1.3298, -1.3298, -1.3298, -1.3298,
- -1.3298, -1.3298]),
- indices=tensor([0, 1, 1, 1, 4, 4, 4, 4, 4, 4]))
- """.format(
- **reduceops_common_args
- ),
- )
- add_docstr(
- torch.cumprod,
- r"""
- cumprod(input, dim, *, dtype=None, out=None) -> Tensor
- Returns the cumulative product of elements of :attr:`input` in the dimension
- :attr:`dim`.
- For example, if :attr:`input` is a vector of size N, the result will also be
- a vector of size N, with elements.
- .. math::
- y_i = x_1 \times x_2\times x_3\times \dots \times x_i
- Args:
- {input}
- dim (int): the dimension to do the operation over
- Keyword args:
- {dtype}
- {out}
- Example::
- >>> a = torch.randn(10)
- >>> a
- tensor([ 0.6001, 0.2069, -0.1919, 0.9792, 0.6727, 1.0062, 0.4126,
- -0.2129, -0.4206, 0.1968])
- >>> torch.cumprod(a, dim=0)
- tensor([ 0.6001, 0.1241, -0.0238, -0.0233, -0.0157, -0.0158, -0.0065,
- 0.0014, -0.0006, -0.0001])
- >>> a[5] = 0.0
- >>> torch.cumprod(a, dim=0)
- tensor([ 0.6001, 0.1241, -0.0238, -0.0233, -0.0157, -0.0000, -0.0000,
- 0.0000, -0.0000, -0.0000])
- """.format(
- **reduceops_common_args
- ),
- )
- add_docstr(
- torch.cumsum,
- r"""
- cumsum(input, dim, *, dtype=None, out=None) -> Tensor
- Returns the cumulative sum of elements of :attr:`input` in the dimension
- :attr:`dim`.
- For example, if :attr:`input` is a vector of size N, the result will also be
- a vector of size N, with elements.
- .. math::
- y_i = x_1 + x_2 + x_3 + \dots + x_i
- Args:
- {input}
- dim (int): the dimension to do the operation over
- Keyword args:
- {dtype}
- {out}
- Example::
- >>> a = torch.randn(10)
- >>> a
- tensor([-0.8286, -0.4890, 0.5155, 0.8443, 0.1865, -0.1752, -2.0595,
- 0.1850, -1.1571, -0.4243])
- >>> torch.cumsum(a, dim=0)
- tensor([-0.8286, -1.3175, -0.8020, 0.0423, 0.2289, 0.0537, -2.0058,
- -1.8209, -2.9780, -3.4022])
- """.format(
- **reduceops_common_args
- ),
- )
- add_docstr(
- torch.count_nonzero,
- r"""
- count_nonzero(input, dim=None) -> Tensor
- Counts the number of non-zero values in the tensor :attr:`input` along the given :attr:`dim`.
- If no dim is specified then all non-zeros in the tensor are counted.
- Args:
- {input}
- dim (int or tuple of ints, optional): Dim or tuple of dims along which to count non-zeros.
- Example::
- >>> x = torch.zeros(3,3)
- >>> x[torch.randn(3,3) > 0.5] = 1
- >>> x
- tensor([[0., 1., 1.],
- [0., 0., 0.],
- [0., 0., 1.]])
- >>> torch.count_nonzero(x)
- tensor(3)
- >>> torch.count_nonzero(x, dim=0)
- tensor([0, 1, 2])
- """.format(
- **reduceops_common_args
- ),
- )
- add_docstr(
- torch.dequantize,
- r"""
- dequantize(tensor) -> Tensor
- Returns an fp32 Tensor by dequantizing a quantized Tensor
- Args:
- tensor (Tensor): A quantized Tensor
- .. function:: dequantize(tensors) -> sequence of Tensors
- :noindex:
- Given a list of quantized Tensors, dequantize them and return a list of fp32 Tensors
- Args:
- tensors (sequence of Tensors): A list of quantized Tensors
- """,
- )
- add_docstr(
- torch.diag,
- r"""
- diag(input, diagonal=0, *, out=None) -> Tensor
- - If :attr:`input` is a vector (1-D tensor), then returns a 2-D square tensor
- with the elements of :attr:`input` as the diagonal.
- - If :attr:`input` is a matrix (2-D tensor), then returns a 1-D tensor with
- the diagonal elements of :attr:`input`.
- The argument :attr:`diagonal` controls which diagonal to consider:
- - If :attr:`diagonal` = 0, it is the main diagonal.
- - If :attr:`diagonal` > 0, it is above the main diagonal.
- - If :attr:`diagonal` < 0, it is below the main diagonal.
- Args:
- {input}
- diagonal (int, optional): the diagonal to consider
- Keyword args:
- {out}
- .. seealso::
- :func:`torch.diagonal` always returns the diagonal of its input.
- :func:`torch.diagflat` always constructs a tensor with diagonal elements
- specified by the input.
- Examples:
- Get the square matrix where the input vector is the diagonal::
- >>> a = torch.randn(3)
- >>> a
- tensor([ 0.5950,-0.0872, 2.3298])
- >>> torch.diag(a)
- tensor([[ 0.5950, 0.0000, 0.0000],
- [ 0.0000,-0.0872, 0.0000],
- [ 0.0000, 0.0000, 2.3298]])
- >>> torch.diag(a, 1)
- tensor([[ 0.0000, 0.5950, 0.0000, 0.0000],
- [ 0.0000, 0.0000,-0.0872, 0.0000],
- [ 0.0000, 0.0000, 0.0000, 2.3298],
- [ 0.0000, 0.0000, 0.0000, 0.0000]])
- Get the k-th diagonal of a given matrix::
- >>> a = torch.randn(3, 3)
- >>> a
- tensor([[-0.4264, 0.0255,-0.1064],
- [ 0.8795,-0.2429, 0.1374],
- [ 0.1029,-0.6482,-1.6300]])
- >>> torch.diag(a, 0)
- tensor([-0.4264,-0.2429,-1.6300])
- >>> torch.diag(a, 1)
- tensor([ 0.0255, 0.1374])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.diag_embed,
- r"""
- diag_embed(input, offset=0, dim1=-2, dim2=-1) -> Tensor
- Creates a tensor whose diagonals of certain 2D planes (specified by
- :attr:`dim1` and :attr:`dim2`) are filled by :attr:`input`.
- To facilitate creating batched diagonal matrices, the 2D planes formed by
- the last two dimensions of the returned tensor are chosen by default.
- The argument :attr:`offset` controls which diagonal to consider:
- - If :attr:`offset` = 0, it is the main diagonal.
- - If :attr:`offset` > 0, it is above the main diagonal.
- - If :attr:`offset` < 0, it is below the main diagonal.
- The size of the new matrix will be calculated to make the specified diagonal
- of the size of the last input dimension.
- Note that for :attr:`offset` other than :math:`0`, the order of :attr:`dim1`
- and :attr:`dim2` matters. Exchanging them is equivalent to changing the
- sign of :attr:`offset`.
- Applying :meth:`torch.diagonal` to the output of this function with
- the same arguments yields a matrix identical to input. However,
- :meth:`torch.diagonal` has different default dimensions, so those
- need to be explicitly specified.
- Args:
- {input} Must be at least 1-dimensional.
- offset (int, optional): which diagonal to consider. Default: 0
- (main diagonal).
- dim1 (int, optional): first dimension with respect to which to
- take diagonal. Default: -2.
- dim2 (int, optional): second dimension with respect to which to
- take diagonal. Default: -1.
- Example::
- >>> a = torch.randn(2, 3)
- >>> torch.diag_embed(a)
- tensor([[[ 1.5410, 0.0000, 0.0000],
- [ 0.0000, -0.2934, 0.0000],
- [ 0.0000, 0.0000, -2.1788]],
- [[ 0.5684, 0.0000, 0.0000],
- [ 0.0000, -1.0845, 0.0000],
- [ 0.0000, 0.0000, -1.3986]]])
- >>> torch.diag_embed(a, offset=1, dim1=0, dim2=2)
- tensor([[[ 0.0000, 1.5410, 0.0000, 0.0000],
- [ 0.0000, 0.5684, 0.0000, 0.0000]],
- [[ 0.0000, 0.0000, -0.2934, 0.0000],
- [ 0.0000, 0.0000, -1.0845, 0.0000]],
- [[ 0.0000, 0.0000, 0.0000, -2.1788],
- [ 0.0000, 0.0000, 0.0000, -1.3986]],
- [[ 0.0000, 0.0000, 0.0000, 0.0000],
- [ 0.0000, 0.0000, 0.0000, 0.0000]]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.diagflat,
- r"""
- diagflat(input, offset=0) -> Tensor
- - If :attr:`input` is a vector (1-D tensor), then returns a 2-D square tensor
- with the elements of :attr:`input` as the diagonal.
- - If :attr:`input` is a tensor with more than one dimension, then returns a
- 2-D tensor with diagonal elements equal to a flattened :attr:`input`.
- The argument :attr:`offset` controls which diagonal to consider:
- - If :attr:`offset` = 0, it is the main diagonal.
- - If :attr:`offset` > 0, it is above the main diagonal.
- - If :attr:`offset` < 0, it is below the main diagonal.
- Args:
- {input}
- offset (int, optional): the diagonal to consider. Default: 0 (main
- diagonal).
- Examples::
- >>> a = torch.randn(3)
- >>> a
- tensor([-0.2956, -0.9068, 0.1695])
- >>> torch.diagflat(a)
- tensor([[-0.2956, 0.0000, 0.0000],
- [ 0.0000, -0.9068, 0.0000],
- [ 0.0000, 0.0000, 0.1695]])
- >>> torch.diagflat(a, 1)
- tensor([[ 0.0000, -0.2956, 0.0000, 0.0000],
- [ 0.0000, 0.0000, -0.9068, 0.0000],
- [ 0.0000, 0.0000, 0.0000, 0.1695],
- [ 0.0000, 0.0000, 0.0000, 0.0000]])
- >>> a = torch.randn(2, 2)
- >>> a
- tensor([[ 0.2094, -0.3018],
- [-0.1516, 1.9342]])
- >>> torch.diagflat(a)
- tensor([[ 0.2094, 0.0000, 0.0000, 0.0000],
- [ 0.0000, -0.3018, 0.0000, 0.0000],
- [ 0.0000, 0.0000, -0.1516, 0.0000],
- [ 0.0000, 0.0000, 0.0000, 1.9342]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.diagonal,
- r"""
- diagonal(input, offset=0, dim1=0, dim2=1) -> Tensor
- Returns a partial view of :attr:`input` with the its diagonal elements
- with respect to :attr:`dim1` and :attr:`dim2` appended as a dimension
- at the end of the shape.
- The argument :attr:`offset` controls which diagonal to consider:
- - If :attr:`offset` = 0, it is the main diagonal.
- - If :attr:`offset` > 0, it is above the main diagonal.
- - If :attr:`offset` < 0, it is below the main diagonal.
- Applying :meth:`torch.diag_embed` to the output of this function with
- the same arguments yields a diagonal matrix with the diagonal entries
- of the input. However, :meth:`torch.diag_embed` has different default
- dimensions, so those need to be explicitly specified.
- Args:
- {input} Must be at least 2-dimensional.
- offset (int, optional): which diagonal to consider. Default: 0
- (main diagonal).
- dim1 (int, optional): first dimension with respect to which to
- take diagonal. Default: 0.
- dim2 (int, optional): second dimension with respect to which to
- take diagonal. Default: 1.
- .. note:: To take a batch diagonal, pass in dim1=-2, dim2=-1.
- Examples::
- >>> a = torch.randn(3, 3)
- >>> a
- tensor([[-1.0854, 1.1431, -0.1752],
- [ 0.8536, -0.0905, 0.0360],
- [ 0.6927, -0.3735, -0.4945]])
- >>> torch.diagonal(a, 0)
- tensor([-1.0854, -0.0905, -0.4945])
- >>> torch.diagonal(a, 1)
- tensor([ 1.1431, 0.0360])
- >>> x = torch.randn(2, 5, 4, 2)
- >>> torch.diagonal(x, offset=-1, dim1=1, dim2=2)
- tensor([[[-1.2631, 0.3755, -1.5977, -1.8172],
- [-1.1065, 1.0401, -0.2235, -0.7938]],
- [[-1.7325, -0.3081, 0.6166, 0.2335],
- [ 1.0500, 0.7336, -0.3836, -1.1015]]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.diagonal_scatter,
- r"""
- diagonal_scatter(input, src, offset=0, dim1=0, dim2=1) -> Tensor
- Embeds the values of the :attr:`src` tensor into :attr:`input` along
- the diagonal elements of :attr:`input`, with respect to :attr:`dim1`
- and :attr:`dim2`.
- This function returns a tensor with fresh storage; it does not
- return a view.
- The argument :attr:`offset` controls which diagonal to consider:
- - If :attr:`offset` = 0, it is the main diagonal.
- - If :attr:`offset` > 0, it is above the main diagonal.
- - If :attr:`offset` < 0, it is below the main diagonal.
- Args:
- {input} Must be at least 2-dimensional.
- src (Tensor): the tensor to embed into :attr:`input`.
- offset (int, optional): which diagonal to consider. Default: 0
- (main diagonal).
- dim1 (int, optional): first dimension with respect to which to
- take diagonal. Default: 0.
- dim2 (int, optional): second dimension with respect to which to
- take diagonal. Default: 1.
- .. note::
- :attr:`src` must be of the proper size in order to be embedded
- into :attr:`input`. Specifically, it should have the same shape as
- ``torch.diagonal(input, offset, dim1, dim2)``
- Examples::
- >>> a = torch.zeros(3, 3)
- >>> a
- tensor([[0., 0., 0.],
- [0., 0., 0.],
- [0., 0., 0.]])
- >>> torch.diagonal_scatter(a, torch.ones(3), 0)
- tensor([[1., 0., 0.],
- [0., 1., 0.],
- [0., 0., 1.]])
- >>> torch.diagonal_scatter(a, torch.ones(2), 1)
- tensor([[0., 1., 0.],
- [0., 0., 1.],
- [0., 0., 0.]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.as_strided_scatter,
- r"""
- as_strided_scatter(input, src, size, stride, storage_offset=None) -> Tensor
- Embeds the values of the :attr:`src` tensor into :attr:`input` along
- the elements corresponding to the result of calling
- input.as_strided(size, stride, storage_offset).
- This function returns a tensor with fresh storage; it does not
- return a view.
- Args:
- {input}
- size (tuple or ints): the shape of the output tensor
- stride (tuple or ints): the stride of the output tensor
- storage_offset (int, optional): the offset in the underlying storage of the output tensor
- .. note::
- :attr:`src` must be of the proper size in order to be embedded
- into :attr:`input`. Specifically, it should have the same shape as
- `torch.as_strided(input, size, stride, storage_offset)`
- Example::
- >>> a = torch.arange(4).reshape(2, 2) + 1
- >>> a
- tensor([[1, 2],
- [3, 4]])
- >>> b = torch.zeros(3, 3)
- >>> b
- tensor([[0., 0., 0.],
- [0., 0., 0.],
- [0., 0., 0.]])
- >>> torch.as_strided_scatter(b, a, (2, 2), (1, 2))
- tensor([[1., 3., 2.],
- [4., 0., 0.],
- [0., 0., 0.]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.diff,
- r"""
- diff(input, n=1, dim=-1, prepend=None, append=None) -> Tensor
- Computes the n-th forward difference along the given dimension.
- The first-order differences are given by `out[i] = input[i + 1] - input[i]`. Higher-order
- differences are calculated by using :func:`torch.diff` recursively.
- Args:
- input (Tensor): the tensor to compute the differences on
- n (int, optional): the number of times to recursively compute the difference
- dim (int, optional): the dimension to compute the difference along.
- Default is the last dimension.
- prepend, append (Tensor, optional): values to prepend or append to
- :attr:`input` along :attr:`dim` before computing the difference.
- Their dimensions must be equivalent to that of input, and their shapes
- must match input's shape except on :attr:`dim`.
- Keyword args:
- {out}
- Example::
- >>> a = torch.tensor([1, 3, 2])
- >>> torch.diff(a)
- tensor([ 2, -1])
- >>> b = torch.tensor([4, 5])
- >>> torch.diff(a, append=b)
- tensor([ 2, -1, 2, 1])
- >>> c = torch.tensor([[1, 2, 3], [3, 4, 5]])
- >>> torch.diff(c, dim=0)
- tensor([[2, 2, 2]])
- >>> torch.diff(c, dim=1)
- tensor([[1, 1],
- [1, 1]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.digamma,
- r"""
- digamma(input, *, out=None) -> Tensor
- Alias for :func:`torch.special.digamma`.
- """,
- )
- add_docstr(
- torch.dist,
- r"""
- dist(input, other, p=2) -> Tensor
- Returns the p-norm of (:attr:`input` - :attr:`other`)
- The shapes of :attr:`input` and :attr:`other` must be
- :ref:`broadcastable <broadcasting-semantics>`.
- Args:
- {input}
- other (Tensor): the Right-hand-side input tensor
- p (float, optional): the norm to be computed
- Example::
- >>> x = torch.randn(4)
- >>> x
- tensor([-1.5393, -0.8675, 0.5916, 1.6321])
- >>> y = torch.randn(4)
- >>> y
- tensor([ 0.0967, -1.0511, 0.6295, 0.8360])
- >>> torch.dist(x, y, 3.5)
- tensor(1.6727)
- >>> torch.dist(x, y, 3)
- tensor(1.6973)
- >>> torch.dist(x, y, 0)
- tensor(4.)
- >>> torch.dist(x, y, 1)
- tensor(2.6537)
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.div,
- r"""
- div(input, other, *, rounding_mode=None, out=None) -> Tensor
- Divides each element of the input ``input`` by the corresponding element of
- :attr:`other`.
- .. math::
- \text{{out}}_i = \frac{{\text{{input}}_i}}{{\text{{other}}_i}}
- .. note::
- By default, this performs a "true" division like Python 3.
- See the :attr:`rounding_mode` argument for floor division.
- Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
- :ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
- Always promotes integer types to the default scalar type.
- Args:
- input (Tensor): the dividend
- other (Tensor or Number): the divisor
- Keyword args:
- rounding_mode (str, optional): Type of rounding applied to the result:
- * None - default behavior. Performs no rounding and, if both :attr:`input` and
- :attr:`other` are integer types, promotes the inputs to the default scalar type.
- Equivalent to true division in Python (the ``/`` operator) and NumPy's ``np.true_divide``.
- * ``"trunc"`` - rounds the results of the division towards zero.
- Equivalent to C-style integer division.
- * ``"floor"`` - rounds the results of the division down.
- Equivalent to floor division in Python (the ``//`` operator) and NumPy's ``np.floor_divide``.
- {out}
- Examples::
- >>> x = torch.tensor([ 0.3810, 1.2774, -0.2972, -0.3719, 0.4637])
- >>> torch.div(x, 0.5)
- tensor([ 0.7620, 2.5548, -0.5944, -0.7438, 0.9274])
- >>> a = torch.tensor([[-0.3711, -1.9353, -0.4605, -0.2917],
- ... [ 0.1815, -1.0111, 0.9805, -1.5923],
- ... [ 0.1062, 1.4581, 0.7759, -1.2344],
- ... [-0.1830, -0.0313, 1.1908, -1.4757]])
- >>> b = torch.tensor([ 0.8032, 0.2930, -0.8113, -0.2308])
- >>> torch.div(a, b)
- tensor([[-0.4620, -6.6051, 0.5676, 1.2639],
- [ 0.2260, -3.4509, -1.2086, 6.8990],
- [ 0.1322, 4.9764, -0.9564, 5.3484],
- [-0.2278, -0.1068, -1.4678, 6.3938]])
- >>> torch.div(a, b, rounding_mode='trunc')
- tensor([[-0., -6., 0., 1.],
- [ 0., -3., -1., 6.],
- [ 0., 4., -0., 5.],
- [-0., -0., -1., 6.]])
- >>> torch.div(a, b, rounding_mode='floor')
- tensor([[-1., -7., 0., 1.],
- [ 0., -4., -2., 6.],
- [ 0., 4., -1., 5.],
- [-1., -1., -2., 6.]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.divide,
- r"""
- divide(input, other, *, rounding_mode=None, out=None) -> Tensor
- Alias for :func:`torch.div`.
- """,
- )
- add_docstr(
- torch.dot,
- r"""
- dot(input, other, *, out=None) -> Tensor
- Computes the dot product of two 1D tensors.
- .. note::
- Unlike NumPy's dot, torch.dot intentionally only supports computing the dot product
- of two 1D tensors with the same number of elements.
- Args:
- input (Tensor): first tensor in the dot product, must be 1D.
- other (Tensor): second tensor in the dot product, must be 1D.
- Keyword args:
- {out}
- Example::
- >>> torch.dot(torch.tensor([2, 3]), torch.tensor([2, 1]))
- tensor(7)
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.vdot,
- r"""
- vdot(input, other, *, out=None) -> Tensor
- Computes the dot product of two 1D vectors along a dimension.
- In symbols, this function computes
- .. math::
- \sum_{i=1}^n \overline{x_i}y_i.
- where :math:`\overline{x_i}` denotes the conjugate for complex
- vectors, and it is the identity for real vectors.
- .. note::
- Unlike NumPy's vdot, torch.vdot intentionally only supports computing the dot product
- of two 1D tensors with the same number of elements.
- .. seealso::
- :func:`torch.linalg.vecdot` computes the dot product of two batches of vectors along a dimension.
- Args:
- input (Tensor): first tensor in the dot product, must be 1D. Its conjugate is used if it's complex.
- other (Tensor): second tensor in the dot product, must be 1D.
- Keyword args:
- """
- + rf"""
- .. note:: {common_args["out"]}
- """
- + r"""
- Example::
- >>> torch.vdot(torch.tensor([2, 3]), torch.tensor([2, 1]))
- tensor(7)
- >>> a = torch.tensor((1 +2j, 3 - 1j))
- >>> b = torch.tensor((2 +1j, 4 - 0j))
- >>> torch.vdot(a, b)
- tensor([16.+1.j])
- >>> torch.vdot(b, a)
- tensor([16.-1.j])
- """,
- )
- add_docstr(
- torch.eq,
- r"""
- eq(input, other, *, out=None) -> Tensor
- Computes element-wise equality
- The second argument can be a number or a tensor whose shape is
- :ref:`broadcastable <broadcasting-semantics>` with the first argument.
- Args:
- input (Tensor): the tensor to compare
- other (Tensor or float): the tensor or value to compare
- Keyword args:
- {out}
- Returns:
- A boolean tensor that is True where :attr:`input` is equal to :attr:`other` and False elsewhere
- Example::
- >>> torch.eq(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
- tensor([[ True, False],
- [False, True]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.equal,
- r"""
- equal(input, other) -> bool
- ``True`` if two tensors have the same size and elements, ``False`` otherwise.
- Example::
- >>> torch.equal(torch.tensor([1, 2]), torch.tensor([1, 2]))
- True
- """,
- )
- add_docstr(
- torch.erf,
- r"""
- erf(input, *, out=None) -> Tensor
- Alias for :func:`torch.special.erf`.
- """,
- )
- add_docstr(
- torch.erfc,
- r"""
- erfc(input, *, out=None) -> Tensor
- Alias for :func:`torch.special.erfc`.
- """,
- )
- add_docstr(
- torch.erfinv,
- r"""
- erfinv(input, *, out=None) -> Tensor
- Alias for :func:`torch.special.erfinv`.
- """,
- )
- add_docstr(
- torch.exp,
- r"""
- exp(input, *, out=None) -> Tensor
- Returns a new tensor with the exponential of the elements
- of the input tensor :attr:`input`.
- .. math::
- y_{i} = e^{x_{i}}
- """
- + r"""
- Args:
- {input}
- Keyword args:
- {out}
- Example::
- >>> torch.exp(torch.tensor([0, math.log(2.)]))
- tensor([ 1., 2.])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.exp2,
- r"""
- exp2(input, *, out=None) -> Tensor
- Alias for :func:`torch.special.exp2`.
- """,
- )
- add_docstr(
- torch.expm1,
- r"""
- expm1(input, *, out=None) -> Tensor
- Alias for :func:`torch.special.expm1`.
- """,
- )
- add_docstr(
- torch.eye,
- r"""
- eye(n, m=None, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
- Returns a 2-D tensor with ones on the diagonal and zeros elsewhere.
- Args:
- n (int): the number of rows
- m (int, optional): the number of columns with default being :attr:`n`
- Keyword arguments:
- {out}
- {dtype}
- {layout}
- {device}
- {requires_grad}
- Returns:
- Tensor: A 2-D tensor with ones on the diagonal and zeros elsewhere
- Example::
- >>> torch.eye(3)
- tensor([[ 1., 0., 0.],
- [ 0., 1., 0.],
- [ 0., 0., 1.]])
- """.format(
- **factory_common_args
- ),
- )
- add_docstr(
- torch.floor,
- r"""
- floor(input, *, out=None) -> Tensor
- Returns a new tensor with the floor of the elements of :attr:`input`,
- the largest integer less than or equal to each element.
- For integer inputs, follows the array-api convention of returning a
- copy of the input tensor.
- .. math::
- \text{out}_{i} = \left\lfloor \text{input}_{i} \right\rfloor
- """
- + r"""
- Args:
- {input}
- Keyword args:
- {out}
- Example::
- >>> a = torch.randn(4)
- >>> a
- tensor([-0.8166, 1.5308, -0.2530, -0.2091])
- >>> torch.floor(a)
- tensor([-1., 1., -1., -1.])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.floor_divide,
- r"""
- floor_divide(input, other, *, out=None) -> Tensor
- .. note::
- Before PyTorch 1.13 :func:`torch.floor_divide` incorrectly performed
- truncation division. To restore the previous behavior use
- :func:`torch.div` with ``rounding_mode='trunc'``.
- Computes :attr:`input` divided by :attr:`other`, elementwise, and floors
- the result.
- .. math::
- \text{{out}}_i = \text{floor} \left( \frac{{\text{{input}}_i}}{{\text{{other}}_i}} \right)
- """
- + r"""
- Supports broadcasting to a common shape, type promotion, and integer and float inputs.
- Args:
- input (Tensor or Number): the dividend
- other (Tensor or Number): the divisor
- Keyword args:
- {out}
- Example::
- >>> a = torch.tensor([4.0, 3.0])
- >>> b = torch.tensor([2.0, 2.0])
- >>> torch.floor_divide(a, b)
- tensor([2.0, 1.0])
- >>> torch.floor_divide(a, 1.4)
- tensor([2.0, 2.0])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.fmod,
- r"""
- fmod(input, other, *, out=None) -> Tensor
- Applies C++'s `std::fmod <https://en.cppreference.com/w/cpp/numeric/math/fmod>`_ entrywise.
- The result has the same sign as the dividend :attr:`input` and its absolute value
- is less than that of :attr:`other`.
- This function may be defined in terms of :func:`torch.div` as
- .. code:: python
- torch.fmod(a, b) == a - a.div(b, rounding_mode="trunc") * b
- Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
- :ref:`type promotion <type-promotion-doc>`, and integer and float inputs.
- .. note::
- When the divisor is zero, returns ``NaN`` for floating point dtypes
- on both CPU and GPU; raises ``RuntimeError`` for integer division by
- zero on CPU; Integer division by zero on GPU may return any value.
- .. note::
- Complex inputs are not supported. In some cases, it is not mathematically
- possible to satisfy the definition of a modulo operation with complex numbers.
- .. seealso::
- :func:`torch.remainder` which implements Python's modulus operator.
- This one is defined using division rounding down the result.
- Args:
- input (Tensor): the dividend
- other (Tensor or Scalar): the divisor
- Keyword args:
- {out}
- Example::
- >>> torch.fmod(torch.tensor([-3., -2, -1, 1, 2, 3]), 2)
- tensor([-1., -0., -1., 1., 0., 1.])
- >>> torch.fmod(torch.tensor([1, 2, 3, 4, 5]), -1.5)
- tensor([1.0000, 0.5000, 0.0000, 1.0000, 0.5000])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.frac,
- r"""
- frac(input, *, out=None) -> Tensor
- Computes the fractional portion of each element in :attr:`input`.
- .. math::
- \text{out}_{i} = \text{input}_{i} - \left\lfloor |\text{input}_{i}| \right\rfloor * \operatorname{sgn}(\text{input}_{i})
- Example::
- >>> torch.frac(torch.tensor([1, 2.5, -3.2]))
- tensor([ 0.0000, 0.5000, -0.2000])
- """,
- )
- add_docstr(
- torch.frexp,
- r"""
- frexp(input, *, out=None) -> (Tensor mantissa, Tensor exponent)
- Decomposes :attr:`input` into mantissa and exponent tensors
- such that :math:`\text{input} = \text{mantissa} \times 2^{\text{exponent}}`.
- The range of mantissa is the open interval (-1, 1).
- Supports float inputs.
- Args:
- input (Tensor): the input tensor
- Keyword args:
- out (tuple, optional): the output tensors
- Example::
- >>> x = torch.arange(9.)
- >>> mantissa, exponent = torch.frexp(x)
- >>> mantissa
- tensor([0.0000, 0.5000, 0.5000, 0.7500, 0.5000, 0.6250, 0.7500, 0.8750, 0.5000])
- >>> exponent
- tensor([0, 1, 2, 2, 3, 3, 3, 3, 4], dtype=torch.int32)
- >>> torch.ldexp(mantissa, exponent)
- tensor([0., 1., 2., 3., 4., 5., 6., 7., 8.])
- """,
- )
- add_docstr(
- torch.from_numpy,
- r"""
- from_numpy(ndarray) -> Tensor
- Creates a :class:`Tensor` from a :class:`numpy.ndarray`.
- The returned tensor and :attr:`ndarray` share the same memory. Modifications to
- the tensor will be reflected in the :attr:`ndarray` and vice versa. The returned
- tensor is not resizable.
- It currently accepts :attr:`ndarray` with dtypes of ``numpy.float64``,
- ``numpy.float32``, ``numpy.float16``, ``numpy.complex64``, ``numpy.complex128``,
- ``numpy.int64``, ``numpy.int32``, ``numpy.int16``, ``numpy.int8``, ``numpy.uint8``,
- and ``numpy.bool``.
- .. warning::
- Writing to a tensor created from a read-only NumPy array is not supported and will result in undefined behavior.
- Example::
- >>> a = numpy.array([1, 2, 3])
- >>> t = torch.from_numpy(a)
- >>> t
- tensor([ 1, 2, 3])
- >>> t[0] = -1
- >>> a
- array([-1, 2, 3])
- """,
- )
- add_docstr(
- torch.frombuffer,
- r"""
- frombuffer(buffer, *, dtype, count=-1, offset=0, requires_grad=False) -> Tensor
- Creates a 1-dimensional :class:`Tensor` from an object that implements
- the Python buffer protocol.
- Skips the first :attr:`offset` bytes in the buffer, and interprets the rest of
- the raw bytes as a 1-dimensional tensor of type :attr:`dtype` with :attr:`count`
- elements.
- Note that either of the following must be true:
- 1. :attr:`count` is a positive non-zero number, and the total number of bytes
- in the buffer is less than :attr:`offset` plus :attr:`count` times the size
- (in bytes) of :attr:`dtype`.
- 2. :attr:`count` is negative, and the length (number of bytes) of the buffer
- subtracted by the :attr:`offset` is a multiple of the size (in bytes) of
- :attr:`dtype`.
- The returned tensor and buffer share the same memory. Modifications to
- the tensor will be reflected in the buffer and vice versa. The returned
- tensor is not resizable.
- .. note::
- This function increments the reference count for the object that
- owns the shared memory. Therefore, such memory will not be deallocated
- before the returned tensor goes out of scope.
- .. warning::
- This function's behavior is undefined when passed an object implementing
- the buffer protocol whose data is not on the CPU. Doing so is likely to
- cause a segmentation fault.
- .. warning::
- This function does not try to infer the :attr:`dtype` (hence, it is not
- optional). Passing a different :attr:`dtype` than its source may result
- in unexpected behavior.
- Args:
- buffer (object): a Python object that exposes the buffer interface.
- Keyword args:
- dtype (:class:`torch.dtype`): the desired data type of returned tensor.
- count (int, optional): the number of desired elements to be read.
- If negative, all the elements (until the end of the buffer) will be
- read. Default: -1.
- offset (int, optional): the number of bytes to skip at the start of
- the buffer. Default: 0.
- {requires_grad}
- Example::
- >>> import array
- >>> a = array.array('i', [1, 2, 3])
- >>> t = torch.frombuffer(a, dtype=torch.int32)
- >>> t
- tensor([ 1, 2, 3])
- >>> t[0] = -1
- >>> a
- array([-1, 2, 3])
- >>> # Interprets the signed char bytes as 32-bit integers.
- >>> # Each 4 signed char elements will be interpreted as
- >>> # 1 signed 32-bit integer.
- >>> import array
- >>> a = array.array('b', [-1, 0, 0, 0])
- >>> torch.frombuffer(a, dtype=torch.int32)
- tensor([255], dtype=torch.int32)
- """.format(
- **factory_common_args
- ),
- )
- add_docstr(
- torch.flatten,
- r"""
- flatten(input, start_dim=0, end_dim=-1) -> Tensor
- Flattens :attr:`input` by reshaping it into a one-dimensional tensor. If :attr:`start_dim` or :attr:`end_dim`
- are passed, only dimensions starting with :attr:`start_dim` and ending with :attr:`end_dim` are flattened.
- The order of elements in :attr:`input` is unchanged.
- Unlike NumPy's flatten, which always copies input's data, this function may return the original object, a view,
- or copy. If no dimensions are flattened, then the original object :attr:`input` is returned. Otherwise, if input can
- be viewed as the flattened shape, then that view is returned. Finally, only if the input cannot be viewed as the
- flattened shape is input's data copied. See :meth:`torch.Tensor.view` for details on when a view will be returned.
- .. note::
- Flattening a zero-dimensional tensor will return a one-dimensional view.
- Args:
- {input}
- start_dim (int): the first dim to flatten
- end_dim (int): the last dim to flatten
- Example::
- >>> t = torch.tensor([[[1, 2],
- ... [3, 4]],
- ... [[5, 6],
- ... [7, 8]]])
- >>> torch.flatten(t)
- tensor([1, 2, 3, 4, 5, 6, 7, 8])
- >>> torch.flatten(t, start_dim=1)
- tensor([[1, 2, 3, 4],
- [5, 6, 7, 8]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.unflatten,
- r"""
- unflatten(input, dim, sizes) -> Tensor
- Expands a dimension of the input tensor over multiple dimensions.
- .. seealso::
- :func:`torch.flatten` the inverse of this function. It coalesces several dimensions into one.
- Args:
- {input}
- dim (int): Dimension to be unflattened, specified as an index into
- ``input.shape``.
- sizes (Tuple[int]): New shape of the unflattened dimension.
- One of its elements can be `-1` in which case the corresponding output
- dimension is inferred. Otherwise, the product of ``sizes`` *must*
- equal ``input.shape[dim]``.
- Returns:
- A View of input with the specified dimension unflattened.
- Examples::
- >>> torch.unflatten(torch.randn(3, 4, 1), 1, (2, 2)).shape
- torch.Size([3, 2, 2, 1])
- >>> torch.unflatten(torch.randn(3, 4, 1), 1, (-1, 2)).shape
- torch.Size([3, 2, 2, 1])
- >>> torch.unflatten(torch.randn(5, 12, 3), -1, (2, 2, 3, 1, 1)).shape
- torch.Size([5, 2, 2, 3, 1, 1, 3])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.gather,
- r"""
- gather(input, dim, index, *, sparse_grad=False, out=None) -> Tensor
- Gathers values along an axis specified by `dim`.
- For a 3-D tensor the output is specified by::
- out[i][j][k] = input[index[i][j][k]][j][k] # if dim == 0
- out[i][j][k] = input[i][index[i][j][k]][k] # if dim == 1
- out[i][j][k] = input[i][j][index[i][j][k]] # if dim == 2
- :attr:`input` and :attr:`index` must have the same number of dimensions.
- It is also required that ``index.size(d) <= input.size(d)`` for all
- dimensions ``d != dim``. :attr:`out` will have the same shape as :attr:`index`.
- Note that ``input`` and ``index`` do not broadcast against each other.
- Args:
- input (Tensor): the source tensor
- dim (int): the axis along which to index
- index (LongTensor): the indices of elements to gather
- Keyword arguments:
- sparse_grad (bool, optional): If ``True``, gradient w.r.t. :attr:`input` will be a sparse tensor.
- out (Tensor, optional): the destination tensor
- Example::
- >>> t = torch.tensor([[1, 2], [3, 4]])
- >>> torch.gather(t, 1, torch.tensor([[0, 0], [1, 0]]))
- tensor([[ 1, 1],
- [ 4, 3]])
- """,
- )
- add_docstr(
- torch.gcd,
- r"""
- gcd(input, other, *, out=None) -> Tensor
- Computes the element-wise greatest common divisor (GCD) of :attr:`input` and :attr:`other`.
- Both :attr:`input` and :attr:`other` must have integer types.
- .. note::
- This defines :math:`gcd(0, 0) = 0`.
- Args:
- {input}
- other (Tensor): the second input tensor
- Keyword arguments:
- {out}
- Example::
- >>> a = torch.tensor([5, 10, 15])
- >>> b = torch.tensor([3, 4, 5])
- >>> torch.gcd(a, b)
- tensor([1, 2, 5])
- >>> c = torch.tensor([3])
- >>> torch.gcd(a, c)
- tensor([1, 1, 3])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.ge,
- r"""
- ge(input, other, *, out=None) -> Tensor
- Computes :math:`\text{input} \geq \text{other}` element-wise.
- """
- + r"""
- The second argument can be a number or a tensor whose shape is
- :ref:`broadcastable <broadcasting-semantics>` with the first argument.
- Args:
- input (Tensor): the tensor to compare
- other (Tensor or float): the tensor or value to compare
- Keyword args:
- {out}
- Returns:
- A boolean tensor that is True where :attr:`input` is greater than or equal to :attr:`other` and False elsewhere
- Example::
- >>> torch.ge(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
- tensor([[True, True], [False, True]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.greater_equal,
- r"""
- greater_equal(input, other, *, out=None) -> Tensor
- Alias for :func:`torch.ge`.
- """,
- )
- add_docstr(
- torch.gradient,
- r"""
- gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors
- Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in
- one or more dimensions using the `second-order accurate central differences method
- <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_.
- The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not
- specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates
- to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional
- :attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and
- :math:`g(1, 2, 3)\ == input[1, 2, 3]`.
- When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates.
- This is detailed in the "Keyword Arguments" section below.
- The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is
- accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be
- improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative
- is estimated using `Taylor’s theorem with remainder <https://en.wikipedia.org/wiki/Taylor%27s_theorem>`_.
- Letting :math:`x` be an interior point and :math:`x+h_r` be point neighboring it, the partial gradient at
- :math:`f(x+h_r)` is estimated using:
- .. math::
- \begin{aligned}
- f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(x_r)}{6} \\
- \end{aligned}
- where :math:`x_r` is a number in the interval :math:`[x, x+ h_r]` and using the fact that :math:`f \in C^3`
- we derive :
- .. math::
- f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l)
- + ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} }
- .. note::
- We estimate the gradient of functions in complex domain
- :math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way.
- The value of each partial derivative at the boundary points is computed differently. See edge_order below.
- Args:
- input (``Tensor``): the tensor that represents the values of the function
- Keyword args:
- spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify
- how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then
- the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the
- indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding
- indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9).
- Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for
- the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then
- the coordinates are (t0[1], t1[2], t2[3])
- dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default
- the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of
- the :attr:`spacing` argument must correspond with the specified dims."
- edge_order (``int``, optional): 1 or 2, for `first-order
- <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ or
- `second-order <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_
- estimation of the boundary ("edge") values, respectively.
- Examples::
- >>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4]
- >>> coordinates = (torch.tensor([-2., -1., 1., 4.]),)
- >>> values = torch.tensor([4., 1., 1., 16.], )
- >>> torch.gradient(values, spacing = coordinates)
- (tensor([-3., -2., 2., 5.]),)
- >>> # Estimates the gradient of the R^2 -> R function whose samples are
- >>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost
- >>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates
- >>> # partial derivative for both dimensions.
- >>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]])
- >>> torch.gradient(t)
- (tensor([[ 9., 18., 36., 72.],
- [ 9., 18., 36., 72.]]),
- tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
- [10.0000, 15.0000, 30.0000, 40.0000]]))
- >>> # A scalar value for spacing modifies the relationship between tensor indices
- >>> # and input coordinates by multiplying the indices to find the
- >>> # coordinates. For example, below the indices of the innermost
- >>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of
- >>> # the outermost dimension 0, 1 translate to coordinates of [0, 2].
- >>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1])
- (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
- [ 4.5000, 9.0000, 18.0000, 36.0000]]),
- tensor([[ 0.5000, 0.7500, 1.5000, 2.0000],
- [ 5.0000, 7.5000, 15.0000, 20.0000]]))
- >>> # doubling the spacing between samples halves the estimated partial gradients.
- >>>
- >>> # Estimates only the partial derivative for dimension 1
- >>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.)
- (tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
- [10.0000, 15.0000, 30.0000, 40.0000]]),)
- >>> # When spacing is a list of scalars, the relationship between the tensor
- >>> # indices and input coordinates changes based on dimension.
- >>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate
- >>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension
- >>> # 0, 1 translate to coordinates of [0, 2].
- >>> torch.gradient(t, spacing = [3., 2.])
- (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
- [ 4.5000, 9.0000, 18.0000, 36.0000]]),
- tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
- [ 3.3333, 5.0000, 10.0000, 13.3333]]))
- >>> # The following example is a replication of the previous one with explicit
- >>> # coordinates.
- >>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9]))
- >>> torch.gradient(t, spacing = coords)
- (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
- [ 4.5000, 9.0000, 18.0000, 36.0000]]),
- tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
- [ 3.3333, 5.0000, 10.0000, 13.3333]]))
- """,
- )
- add_docstr(
- torch.geqrf,
- r"""
- geqrf(input, *, out=None) -> (Tensor, Tensor)
- This is a low-level function for calling LAPACK's geqrf directly. This function
- returns a namedtuple (a, tau) as defined in `LAPACK documentation for geqrf`_ .
- Computes a QR decomposition of :attr:`input`.
- Both `Q` and `R` matrices are stored in the same output tensor `a`.
- The elements of `R` are stored on and above the diagonal.
- Elementary reflectors (or Householder vectors) implicitly defining matrix `Q`
- are stored below the diagonal.
- The results of this function can be used together with :func:`torch.linalg.householder_product`
- to obtain the `Q` matrix or
- with :func:`torch.ormqr`, which uses an implicit representation of the `Q` matrix,
- for an efficient matrix-matrix multiplication.
- See `LAPACK documentation for geqrf`_ for further details.
- .. note::
- See also :func:`torch.linalg.qr`, which computes Q and R matrices, and :func:`torch.linalg.lstsq`
- with the ``driver="gels"`` option for a function that can solve matrix equations using a QR decomposition.
- Args:
- input (Tensor): the input matrix
- Keyword args:
- out (tuple, optional): the output tuple of (Tensor, Tensor). Ignored if `None`. Default: `None`.
- .. _LAPACK documentation for geqrf:
- http://www.netlib.org/lapack/explore-html/df/dc5/group__variants_g_ecomputational_ga3766ea903391b5cf9008132f7440ec7b.html
- """,
- )
- add_docstr(
- torch.inner,
- r"""
- inner(input, other, *, out=None) -> Tensor
- Computes the dot product for 1D tensors. For higher dimensions, sums the product
- of elements from :attr:`input` and :attr:`other` along their last dimension.
- .. note::
- If either :attr:`input` or :attr:`other` is a scalar, the result is equivalent
- to `torch.mul(input, other)`.
- If both :attr:`input` and :attr:`other` are non-scalars, the size of their last
- dimension must match and the result is equivalent to `torch.tensordot(input,
- other, dims=([-1], [-1]))`
- Args:
- input (Tensor): First input tensor
- other (Tensor): Second input tensor
- Keyword args:
- out (Tensor, optional): Optional output tensor to write result into. The output
- shape is `input.shape[:-1] + other.shape[:-1]`.
- Example::
- # Dot product
- >>> torch.inner(torch.tensor([1, 2, 3]), torch.tensor([0, 2, 1]))
- tensor(7)
- # Multidimensional input tensors
- >>> a = torch.randn(2, 3)
- >>> a
- tensor([[0.8173, 1.0874, 1.1784],
- [0.3279, 0.1234, 2.7894]])
- >>> b = torch.randn(2, 4, 3)
- >>> b
- tensor([[[-0.4682, -0.7159, 0.1506],
- [ 0.4034, -0.3657, 1.0387],
- [ 0.9892, -0.6684, 0.1774],
- [ 0.9482, 1.3261, 0.3917]],
- [[ 0.4537, 0.7493, 1.1724],
- [ 0.2291, 0.5749, -0.2267],
- [-0.7920, 0.3607, -0.3701],
- [ 1.3666, -0.5850, -1.7242]]])
- >>> torch.inner(a, b)
- tensor([[[-0.9837, 1.1560, 0.2907, 2.6785],
- [ 2.5671, 0.5452, -0.6912, -1.5509]],
- [[ 0.1782, 2.9843, 0.7366, 1.5672],
- [ 3.5115, -0.4864, -1.2476, -4.4337]]])
- # Scalar input
- >>> torch.inner(a, torch.tensor(2))
- tensor([[1.6347, 2.1748, 2.3567],
- [0.6558, 0.2469, 5.5787]])
- """,
- )
- add_docstr(
- torch.outer,
- r"""
- outer(input, vec2, *, out=None) -> Tensor
- Outer product of :attr:`input` and :attr:`vec2`.
- If :attr:`input` is a vector of size :math:`n` and :attr:`vec2` is a vector of
- size :math:`m`, then :attr:`out` must be a matrix of size :math:`(n \times m)`.
- .. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
- Args:
- input (Tensor): 1-D input vector
- vec2 (Tensor): 1-D input vector
- Keyword args:
- out (Tensor, optional): optional output matrix
- Example::
- >>> v1 = torch.arange(1., 5.)
- >>> v2 = torch.arange(1., 4.)
- >>> torch.outer(v1, v2)
- tensor([[ 1., 2., 3.],
- [ 2., 4., 6.],
- [ 3., 6., 9.],
- [ 4., 8., 12.]])
- """,
- )
- add_docstr(
- torch.ger,
- r"""
- ger(input, vec2, *, out=None) -> Tensor
- Alias of :func:`torch.outer`.
- .. warning::
- This function is deprecated and will be removed in a future PyTorch release.
- Use :func:`torch.outer` instead.
- """,
- )
- add_docstr(
- torch.get_default_dtype,
- r"""
- get_default_dtype() -> torch.dtype
- Get the current default floating point :class:`torch.dtype`.
- Example::
- >>> torch.get_default_dtype() # initial default for floating point is torch.float32
- torch.float32
- >>> torch.set_default_dtype(torch.float64)
- >>> torch.get_default_dtype() # default is now changed to torch.float64
- torch.float64
- >>> torch.set_default_tensor_type(torch.FloatTensor) # setting tensor type also affects this
- >>> torch.get_default_dtype() # changed to torch.float32, the dtype for torch.FloatTensor
- torch.float32
- """,
- )
- add_docstr(
- torch.get_num_threads,
- r"""
- get_num_threads() -> int
- Returns the number of threads used for parallelizing CPU operations
- """,
- )
- add_docstr(
- torch.get_num_interop_threads,
- r"""
- get_num_interop_threads() -> int
- Returns the number of threads used for inter-op parallelism on CPU
- (e.g. in JIT interpreter)
- """,
- )
- add_docstr(
- torch.gt,
- r"""
- gt(input, other, *, out=None) -> Tensor
- Computes :math:`\text{input} > \text{other}` element-wise.
- """
- + r"""
- The second argument can be a number or a tensor whose shape is
- :ref:`broadcastable <broadcasting-semantics>` with the first argument.
- Args:
- input (Tensor): the tensor to compare
- other (Tensor or float): the tensor or value to compare
- Keyword args:
- {out}
- Returns:
- A boolean tensor that is True where :attr:`input` is greater than :attr:`other` and False elsewhere
- Example::
- >>> torch.gt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
- tensor([[False, True], [False, False]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.greater,
- r"""
- greater(input, other, *, out=None) -> Tensor
- Alias for :func:`torch.gt`.
- """,
- )
- add_docstr(
- torch.histc,
- r"""
- histc(input, bins=100, min=0, max=0, *, out=None) -> Tensor
- Computes the histogram of a tensor.
- The elements are sorted into equal width bins between :attr:`min` and
- :attr:`max`. If :attr:`min` and :attr:`max` are both zero, the minimum and
- maximum values of the data are used.
- Elements lower than min and higher than max and ``NaN`` elements are ignored.
- Args:
- {input}
- bins (int): number of histogram bins
- min (Scalar): lower end of the range (inclusive)
- max (Scalar): upper end of the range (inclusive)
- Keyword args:
- {out}
- Returns:
- Tensor: Histogram represented as a tensor
- Example::
- >>> torch.histc(torch.tensor([1., 2, 1]), bins=4, min=0, max=3)
- tensor([ 0., 2., 1., 0.])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.histogram,
- r"""
- histogram(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor)
- Computes a histogram of the values in a tensor.
- :attr:`bins` can be an integer or a 1D tensor.
- If :attr:`bins` is an int, it specifies the number of equal-width bins.
- By default, the lower and upper range of the bins is determined by the
- minimum and maximum elements of the input tensor. The :attr:`range`
- argument can be provided to specify a range for the bins.
- If :attr:`bins` is a 1D tensor, it specifies the sequence of bin edges
- including the rightmost edge. It should contain at least 2 elements
- and its elements should be increasing.
- Args:
- {input}
- bins: int or 1D Tensor. If int, defines the number of equal-width bins. If tensor,
- defines the sequence of bin edges including the rightmost edge.
- Keyword args:
- range (tuple of float): Defines the range of the bins.
- weight (Tensor): If provided, weight should have the same shape as input. Each value in
- input contributes its associated weight towards its bin's result.
- density (bool): If False, the result will contain the count (or total weight) in each bin.
- If True, the result is the value of the probability density function over the bins,
- normalized such that the integral over the range of the bins is 1.
- {out} (tuple, optional): The result tuple of two output tensors (hist, bin_edges).
- Returns:
- hist (Tensor): 1D Tensor containing the values of the histogram.
- bin_edges(Tensor): 1D Tensor containing the edges of the histogram bins.
- Example::
- >>> torch.histogram(torch.tensor([1., 2, 1]), bins=4, range=(0., 3.), weight=torch.tensor([1., 2., 4.]))
- (tensor([ 0., 5., 2., 0.]), tensor([0., 0.75, 1.5, 2.25, 3.]))
- >>> torch.histogram(torch.tensor([1., 2, 1]), bins=4, range=(0., 3.), weight=torch.tensor([1., 2., 4.]), density=True)
- (tensor([ 0., 0.9524, 0.3810, 0.]), tensor([0., 0.75, 1.5, 2.25, 3.]))
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.histogramdd,
- r"""
- histogramdd(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor[])
- Computes a multi-dimensional histogram of the values in a tensor.
- Interprets the elements of an input tensor whose innermost dimension has size N
- as a collection of N-dimensional points. Maps each of the points into a set of
- N-dimensional bins and returns the number of points (or total weight) in each bin.
- :attr:`input` must be a tensor with at least 2 dimensions.
- If input has shape (M, N), each of its M rows defines a point in N-dimensional space.
- If input has three or more dimensions, all but the last dimension are flattened.
- Each dimension is independently associated with its own strictly increasing sequence
- of bin edges. Bin edges may be specified explicitly by passing a sequence of 1D
- tensors. Alternatively, bin edges may be constructed automatically by passing a
- sequence of integers specifying the number of equal-width bins in each dimension.
- For each N-dimensional point in input:
- - Each of its coordinates is binned independently among the bin edges
- corresponding to its dimension
- - Binning results are combined to identify the N-dimensional bin (if any)
- into which the point falls
- - If the point falls into a bin, the bin's count (or total weight) is incremented
- - Points which do not fall into any bin do not contribute to the output
- :attr:`bins` can be a sequence of N 1D tensors, a sequence of N ints, or a single int.
- If :attr:`bins` is a sequence of N 1D tensors, it explicitly specifies the N sequences
- of bin edges. Each 1D tensor should contain a strictly increasing sequence with at
- least one element. A sequence of K bin edges defines K-1 bins, explicitly specifying
- the left and right edges of all bins. Every bin is exclusive of its left edge. Only
- the rightmost bin is inclusive of its right edge.
- If :attr:`bins` is a sequence of N ints, it specifies the number of equal-width bins
- in each dimension. By default, the leftmost and rightmost bin edges in each dimension
- are determined by the minimum and maximum elements of the input tensor in the
- corresponding dimension. The :attr:`range` argument can be provided to manually
- specify the leftmost and rightmost bin edges in each dimension.
- If :attr:`bins` is an int, it specifies the number of equal-width bins for all dimensions.
- .. note::
- See also :func:`torch.histogram`, which specifically computes 1D histograms.
- While :func:`torch.histogramdd` infers the dimensionality of its bins and
- binned values from the shape of :attr:`input`, :func:`torch.histogram`
- accepts and flattens :attr:`input` of any shape.
- Args:
- {input}
- bins: Tensor[], int[], or int.
- If Tensor[], defines the sequences of bin edges.
- If int[], defines the number of equal-width bins in each dimension.
- If int, defines the number of equal-width bins for all dimensions.
- Keyword args:
- range (sequence of float): Defines the leftmost and rightmost bin edges
- in each dimension.
- weight (Tensor): By default, each value in the input has weight 1. If a weight
- tensor is passed, each N-dimensional coordinate in input
- contributes its associated weight towards its bin's result.
- The weight tensor should have the same shape as the :attr:`input`
- tensor excluding its innermost dimension N.
- density (bool): If False (default), the result will contain the count (or total weight)
- in each bin. If True, each count (weight) is divided by the total count
- (total weight), then divided by the volume of its associated bin.
- Returns:
- hist (Tensor): N-dimensional Tensor containing the values of the histogram.
- bin_edges(Tensor[]): sequence of N 1D Tensors containing the bin edges.
- Example::
- >>> torch.histogramdd(torch.tensor([[0., 1.], [1., 0.], [2., 0.], [2., 2.]]), bins=[3, 3],
- ... weight=torch.tensor([1., 2., 4., 8.]))
- torch.return_types.histogramdd(
- hist=tensor([[0., 1., 0.],
- [2., 0., 0.],
- [4., 0., 8.]]),
- bin_edges=(tensor([0.0000, 0.6667, 1.3333, 2.0000]),
- tensor([0.0000, 0.6667, 1.3333, 2.0000])))
- >>> torch.histogramdd(torch.tensor([[0., 0.], [1., 1.], [2., 2.]]), bins=[2, 2],
- ... range=[0., 1., 0., 1.], density=True)
- torch.return_types.histogramdd(
- hist=tensor([[2., 0.],
- [0., 2.]]),
- bin_edges=(tensor([0.0000, 0.5000, 1.0000]),
- tensor([0.0000, 0.5000, 1.0000])))
- """.format(
- **common_args
- ),
- )
- # TODO: Fix via https://github.com/pytorch/pytorch/issues/75798
- torch.histogramdd.__module__ = "torch"
- add_docstr(
- torch.hypot,
- r"""
- hypot(input, other, *, out=None) -> Tensor
- Given the legs of a right triangle, return its hypotenuse.
- .. math::
- \text{out}_{i} = \sqrt{\text{input}_{i}^{2} + \text{other}_{i}^{2}}
- The shapes of ``input`` and ``other`` must be
- :ref:`broadcastable <broadcasting-semantics>`.
- """
- + r"""
- Args:
- input (Tensor): the first input tensor
- other (Tensor): the second input tensor
- Keyword args:
- {out}
- Example::
- >>> a = torch.hypot(torch.tensor([4.0]), torch.tensor([3.0, 4.0, 5.0]))
- tensor([5.0000, 5.6569, 6.4031])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.i0,
- r"""
- i0(input, *, out=None) -> Tensor
- Alias for :func:`torch.special.i0`.
- """,
- )
- add_docstr(
- torch.igamma,
- r"""
- igamma(input, other, *, out=None) -> Tensor
- Alias for :func:`torch.special.gammainc`.
- """,
- )
- add_docstr(
- torch.igammac,
- r"""
- igammac(input, other, *, out=None) -> Tensor
- Alias for :func:`torch.special.gammaincc`.
- """,
- )
- add_docstr(
- torch.index_select,
- r"""
- index_select(input, dim, index, *, out=None) -> Tensor
- Returns a new tensor which indexes the :attr:`input` tensor along dimension
- :attr:`dim` using the entries in :attr:`index` which is a `LongTensor`.
- The returned tensor has the same number of dimensions as the original tensor
- (:attr:`input`). The :attr:`dim`\ th dimension has the same size as the length
- of :attr:`index`; other dimensions have the same size as in the original tensor.
- .. note:: The returned tensor does **not** use the same storage as the original
- tensor. If :attr:`out` has a different shape than expected, we
- silently change it to the correct shape, reallocating the underlying
- storage if necessary.
- Args:
- {input}
- dim (int): the dimension in which we index
- index (IntTensor or LongTensor): the 1-D tensor containing the indices to index
- Keyword args:
- {out}
- Example::
- >>> x = torch.randn(3, 4)
- >>> x
- tensor([[ 0.1427, 0.0231, -0.5414, -1.0009],
- [-0.4664, 0.2647, -0.1228, -1.1068],
- [-1.1734, -0.6571, 0.7230, -0.6004]])
- >>> indices = torch.tensor([0, 2])
- >>> torch.index_select(x, 0, indices)
- tensor([[ 0.1427, 0.0231, -0.5414, -1.0009],
- [-1.1734, -0.6571, 0.7230, -0.6004]])
- >>> torch.index_select(x, 1, indices)
- tensor([[ 0.1427, -0.5414],
- [-0.4664, -0.1228],
- [-1.1734, 0.7230]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.inverse,
- r"""
- inverse(input, *, out=None) -> Tensor
- Alias for :func:`torch.linalg.inv`
- """,
- )
- add_docstr(
- torch.isin,
- r"""
- isin(elements, test_elements, *, assume_unique=False, invert=False) -> Tensor
- Tests if each element of :attr:`elements` is in :attr:`test_elements`. Returns
- a boolean tensor of the same shape as :attr:`elements` that is True for elements
- in :attr:`test_elements` and False otherwise.
- .. note::
- One of :attr:`elements` or :attr:`test_elements` can be a scalar, but not both.
- Args:
- elements (Tensor or Scalar): Input elements
- test_elements (Tensor or Scalar): Values against which to test for each input element
- assume_unique (bool, optional): If True, assumes both :attr:`elements` and
- :attr:`test_elements` contain unique elements, which can speed up the
- calculation. Default: False
- invert (bool, optional): If True, inverts the boolean return tensor, resulting in True
- values for elements *not* in :attr:`test_elements`. Default: False
- Returns:
- A boolean tensor of the same shape as :attr:`elements` that is True for elements in
- :attr:`test_elements` and False otherwise
- Example:
- >>> torch.isin(torch.tensor([[1, 2], [3, 4]]), torch.tensor([2, 3]))
- tensor([[False, True],
- [ True, False]])
- """,
- )
- add_docstr(
- torch.isinf,
- r"""
- isinf(input) -> Tensor
- Tests if each element of :attr:`input` is infinite
- (positive or negative infinity) or not.
- .. note::
- Complex values are infinite when their real or imaginary part is
- infinite.
- Args:
- {input}
- Returns:
- A boolean tensor that is True where :attr:`input` is infinite and False elsewhere
- Example::
- >>> torch.isinf(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')]))
- tensor([False, True, False, True, False])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.isposinf,
- r"""
- isposinf(input, *, out=None) -> Tensor
- Tests if each element of :attr:`input` is positive infinity or not.
- Args:
- {input}
- Keyword args:
- {out}
- Example::
- >>> a = torch.tensor([-float('inf'), float('inf'), 1.2])
- >>> torch.isposinf(a)
- tensor([False, True, False])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.isneginf,
- r"""
- isneginf(input, *, out=None) -> Tensor
- Tests if each element of :attr:`input` is negative infinity or not.
- Args:
- {input}
- Keyword args:
- {out}
- Example::
- >>> a = torch.tensor([-float('inf'), float('inf'), 1.2])
- >>> torch.isneginf(a)
- tensor([ True, False, False])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.isclose,
- r"""
- isclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor
- Returns a new tensor with boolean elements representing if each element of
- :attr:`input` is "close" to the corresponding element of :attr:`other`.
- Closeness is defined as:
- .. math::
- \lvert \text{input} - \text{other} \rvert \leq \texttt{atol} + \texttt{rtol} \times \lvert \text{other} \rvert
- """
- + r"""
- where :attr:`input` and :attr:`other` are finite. Where :attr:`input`
- and/or :attr:`other` are nonfinite they are close if and only if
- they are equal, with NaNs being considered equal to each other when
- :attr:`equal_nan` is True.
- Args:
- input (Tensor): first tensor to compare
- other (Tensor): second tensor to compare
- atol (float, optional): absolute tolerance. Default: 1e-08
- rtol (float, optional): relative tolerance. Default: 1e-05
- equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal. Default: ``False``
- Examples::
- >>> torch.isclose(torch.tensor((1., 2, 3)), torch.tensor((1 + 1e-10, 3, 4)))
- tensor([ True, False, False])
- >>> torch.isclose(torch.tensor((float('inf'), 4)), torch.tensor((float('inf'), 6)), rtol=.5)
- tensor([True, True])
- """,
- )
- add_docstr(
- torch.isfinite,
- r"""
- isfinite(input) -> Tensor
- Returns a new tensor with boolean elements representing if each element is `finite` or not.
- Real values are finite when they are not NaN, negative infinity, or infinity.
- Complex values are finite when both their real and imaginary parts are finite.
- Args:
- {input}
- Returns:
- A boolean tensor that is True where :attr:`input` is finite and False elsewhere
- Example::
- >>> torch.isfinite(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')]))
- tensor([True, False, True, False, False])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.isnan,
- r"""
- isnan(input) -> Tensor
- Returns a new tensor with boolean elements representing if each element of :attr:`input`
- is NaN or not. Complex values are considered NaN when either their real
- and/or imaginary part is NaN.
- Arguments:
- {input}
- Returns:
- A boolean tensor that is True where :attr:`input` is NaN and False elsewhere
- Example::
- >>> torch.isnan(torch.tensor([1, float('nan'), 2]))
- tensor([False, True, False])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.isreal,
- r"""
- isreal(input) -> Tensor
- Returns a new tensor with boolean elements representing if each element of :attr:`input` is real-valued or not.
- All real-valued types are considered real. Complex values are considered real when their imaginary part is 0.
- Arguments:
- {input}
- Returns:
- A boolean tensor that is True where :attr:`input` is real and False elsewhere
- Example::
- >>> torch.isreal(torch.tensor([1, 1+1j, 2+0j]))
- tensor([True, False, True])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.is_floating_point,
- r"""
- is_floating_point(input) -> (bool)
- Returns True if the data type of :attr:`input` is a floating point data type i.e.,
- one of ``torch.float64``, ``torch.float32``, ``torch.float16``, and ``torch.bfloat16``.
- Args:
- {input}
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.is_complex,
- r"""
- is_complex(input) -> (bool)
- Returns True if the data type of :attr:`input` is a complex data type i.e.,
- one of ``torch.complex64``, and ``torch.complex128``.
- Args:
- {input}
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.is_grad_enabled,
- r"""
- is_grad_enabled() -> (bool)
- Returns True if grad mode is currently enabled.
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.is_inference_mode_enabled,
- r"""
- is_inference_mode_enabled() -> (bool)
- Returns True if inference mode is currently enabled.
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.is_inference,
- r"""
- is_inference(input) -> (bool)
- Returns True if :attr:`input` is an inference tensor.
- A non-view tensor is an inference tensor if and only if it was
- allocated during inference mode. A view tensor is an inference
- tensor if and only if the tensor it is a view of is an inference tensor.
- For details on inference mode please see
- `Inference Mode <https://pytorch.org/cppdocs/notes/inference_mode.html>`_.
- Args:
- {input}
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.is_conj,
- r"""
- is_conj(input) -> (bool)
- Returns True if the :attr:`input` is a conjugated tensor, i.e. its conjugate bit is set to `True`.
- Args:
- {input}
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.is_nonzero,
- r"""
- is_nonzero(input) -> (bool)
- Returns True if the :attr:`input` is a single element tensor which is not equal to zero
- after type conversions.
- i.e. not equal to ``torch.tensor([0.])`` or ``torch.tensor([0])`` or
- ``torch.tensor([False])``.
- Throws a ``RuntimeError`` if ``torch.numel() != 1`` (even in case
- of sparse tensors).
- Args:
- {input}
- Examples::
- >>> torch.is_nonzero(torch.tensor([0.]))
- False
- >>> torch.is_nonzero(torch.tensor([1.5]))
- True
- >>> torch.is_nonzero(torch.tensor([False]))
- False
- >>> torch.is_nonzero(torch.tensor([3]))
- True
- >>> torch.is_nonzero(torch.tensor([1, 3, 5]))
- Traceback (most recent call last):
- ...
- RuntimeError: bool value of Tensor with more than one value is ambiguous
- >>> torch.is_nonzero(torch.tensor([]))
- Traceback (most recent call last):
- ...
- RuntimeError: bool value of Tensor with no values is ambiguous
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.kron,
- r"""
- kron(input, other, *, out=None) -> Tensor
- Computes the Kronecker product, denoted by :math:`\otimes`, of :attr:`input` and :attr:`other`.
- If :attr:`input` is a :math:`(a_0 \times a_1 \times \dots \times a_n)` tensor and :attr:`other` is a
- :math:`(b_0 \times b_1 \times \dots \times b_n)` tensor, the result will be a
- :math:`(a_0*b_0 \times a_1*b_1 \times \dots \times a_n*b_n)` tensor with the following entries:
- .. math::
- (\text{input} \otimes \text{other})_{k_0, k_1, \dots, k_n} =
- \text{input}_{i_0, i_1, \dots, i_n} * \text{other}_{j_0, j_1, \dots, j_n},
- where :math:`k_t = i_t * b_t + j_t` for :math:`0 \leq t \leq n`.
- If one tensor has fewer dimensions than the other it is unsqueezed until it has the same number of dimensions.
- Supports real-valued and complex-valued inputs.
- .. note::
- This function generalizes the typical definition of the Kronecker product for two matrices to two tensors,
- as described above. When :attr:`input` is a :math:`(m \times n)` matrix and :attr:`other` is a
- :math:`(p \times q)` matrix, the result will be a :math:`(p*m \times q*n)` block matrix:
- .. math::
- \mathbf{A} \otimes \mathbf{B}=\begin{bmatrix}
- a_{11} \mathbf{B} & \cdots & a_{1 n} \mathbf{B} \\
- \vdots & \ddots & \vdots \\
- a_{m 1} \mathbf{B} & \cdots & a_{m n} \mathbf{B} \end{bmatrix}
- where :attr:`input` is :math:`\mathbf{A}` and :attr:`other` is :math:`\mathbf{B}`.
- Arguments:
- input (Tensor)
- other (Tensor)
- Keyword args:
- out (Tensor, optional): The output tensor. Ignored if ``None``. Default: ``None``
- Examples::
- >>> mat1 = torch.eye(2)
- >>> mat2 = torch.ones(2, 2)
- >>> torch.kron(mat1, mat2)
- tensor([[1., 1., 0., 0.],
- [1., 1., 0., 0.],
- [0., 0., 1., 1.],
- [0., 0., 1., 1.]])
- >>> mat1 = torch.eye(2)
- >>> mat2 = torch.arange(1, 5).reshape(2, 2)
- >>> torch.kron(mat1, mat2)
- tensor([[1., 2., 0., 0.],
- [3., 4., 0., 0.],
- [0., 0., 1., 2.],
- [0., 0., 3., 4.]])
- """,
- )
- add_docstr(
- torch.kthvalue,
- r"""
- kthvalue(input, k, dim=None, keepdim=False, *, out=None) -> (Tensor, LongTensor)
- Returns a namedtuple ``(values, indices)`` where ``values`` is the :attr:`k` th
- smallest element of each row of the :attr:`input` tensor in the given dimension
- :attr:`dim`. And ``indices`` is the index location of each element found.
- If :attr:`dim` is not given, the last dimension of the `input` is chosen.
- If :attr:`keepdim` is ``True``, both the :attr:`values` and :attr:`indices` tensors
- are the same size as :attr:`input`, except in the dimension :attr:`dim` where
- they are of size 1. Otherwise, :attr:`dim` is squeezed
- (see :func:`torch.squeeze`), resulting in both the :attr:`values` and
- :attr:`indices` tensors having 1 fewer dimension than the :attr:`input` tensor.
- .. note::
- When :attr:`input` is a CUDA tensor and there are multiple valid
- :attr:`k` th values, this function may nondeterministically return
- :attr:`indices` for any of them.
- Args:
- {input}
- k (int): k for the k-th smallest element
- dim (int, optional): the dimension to find the kth value along
- {keepdim}
- Keyword args:
- out (tuple, optional): the output tuple of (Tensor, LongTensor)
- can be optionally given to be used as output buffers
- Example::
- >>> x = torch.arange(1., 6.)
- >>> x
- tensor([ 1., 2., 3., 4., 5.])
- >>> torch.kthvalue(x, 4)
- torch.return_types.kthvalue(values=tensor(4.), indices=tensor(3))
- >>> x=torch.arange(1.,7.).resize_(2,3)
- >>> x
- tensor([[ 1., 2., 3.],
- [ 4., 5., 6.]])
- >>> torch.kthvalue(x, 2, 0, True)
- torch.return_types.kthvalue(values=tensor([[4., 5., 6.]]), indices=tensor([[1, 1, 1]]))
- """.format(
- **single_dim_common
- ),
- )
- add_docstr(
- torch.lcm,
- r"""
- lcm(input, other, *, out=None) -> Tensor
- Computes the element-wise least common multiple (LCM) of :attr:`input` and :attr:`other`.
- Both :attr:`input` and :attr:`other` must have integer types.
- .. note::
- This defines :math:`lcm(0, 0) = 0` and :math:`lcm(0, a) = 0`.
- Args:
- {input}
- other (Tensor): the second input tensor
- Keyword arguments:
- {out}
- Example::
- >>> a = torch.tensor([5, 10, 15])
- >>> b = torch.tensor([3, 4, 5])
- >>> torch.lcm(a, b)
- tensor([15, 20, 15])
- >>> c = torch.tensor([3])
- >>> torch.lcm(a, c)
- tensor([15, 30, 15])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.ldexp,
- r"""
- ldexp(input, other, *, out=None) -> Tensor
- Multiplies :attr:`input` by 2 ** :attr:`other`.
- .. math::
- \text{{out}}_i = \text{{input}}_i * 2^\text{{other}}_i
- """
- + r"""
- Typically this function is used to construct floating point numbers by multiplying
- mantissas in :attr:`input` with integral powers of two created from the exponents
- in :attr:`other`.
- Args:
- {input}
- other (Tensor): a tensor of exponents, typically integers.
- Keyword args:
- {out}
- Example::
- >>> torch.ldexp(torch.tensor([1.]), torch.tensor([1]))
- tensor([2.])
- >>> torch.ldexp(torch.tensor([1.0]), torch.tensor([1, 2, 3, 4]))
- tensor([ 2., 4., 8., 16.])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.le,
- r"""
- le(input, other, *, out=None) -> Tensor
- Computes :math:`\text{input} \leq \text{other}` element-wise.
- """
- + r"""
- The second argument can be a number or a tensor whose shape is
- :ref:`broadcastable <broadcasting-semantics>` with the first argument.
- Args:
- input (Tensor): the tensor to compare
- other (Tensor or Scalar): the tensor or value to compare
- Keyword args:
- {out}
- Returns:
- A boolean tensor that is True where :attr:`input` is less than or equal to
- :attr:`other` and False elsewhere
- Example::
- >>> torch.le(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
- tensor([[True, False], [True, True]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.less_equal,
- r"""
- less_equal(input, other, *, out=None) -> Tensor
- Alias for :func:`torch.le`.
- """,
- )
- add_docstr(
- torch.lerp,
- r"""
- lerp(input, end, weight, *, out=None)
- Does a linear interpolation of two tensors :attr:`start` (given by :attr:`input`) and :attr:`end` based
- on a scalar or tensor :attr:`weight` and returns the resulting :attr:`out` tensor.
- .. math::
- \text{out}_i = \text{start}_i + \text{weight}_i \times (\text{end}_i - \text{start}_i)
- """
- + r"""
- The shapes of :attr:`start` and :attr:`end` must be
- :ref:`broadcastable <broadcasting-semantics>`. If :attr:`weight` is a tensor, then
- the shapes of :attr:`weight`, :attr:`start`, and :attr:`end` must be :ref:`broadcastable <broadcasting-semantics>`.
- Args:
- input (Tensor): the tensor with the starting points
- end (Tensor): the tensor with the ending points
- weight (float or tensor): the weight for the interpolation formula
- Keyword args:
- {out}
- Example::
- >>> start = torch.arange(1., 5.)
- >>> end = torch.empty(4).fill_(10)
- >>> start
- tensor([ 1., 2., 3., 4.])
- >>> end
- tensor([ 10., 10., 10., 10.])
- >>> torch.lerp(start, end, 0.5)
- tensor([ 5.5000, 6.0000, 6.5000, 7.0000])
- >>> torch.lerp(start, end, torch.full_like(start, 0.5))
- tensor([ 5.5000, 6.0000, 6.5000, 7.0000])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.lgamma,
- r"""
- lgamma(input, *, out=None) -> Tensor
- Computes the natural logarithm of the absolute value of the gamma function on :attr:`input`.
- .. math::
- \text{out}_{i} = \ln \Gamma(|\text{input}_{i}|)
- """
- + """
- Args:
- {input}
- Keyword args:
- {out}
- Example::
- >>> a = torch.arange(0.5, 2, 0.5)
- >>> torch.lgamma(a)
- tensor([ 0.5724, 0.0000, -0.1208])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.linspace,
- r"""
- linspace(start, end, steps, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
- Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
- spaced from :attr:`start` to :attr:`end`, inclusive. That is, the value are:
- .. math::
- (\text{start},
- \text{start} + \frac{\text{end} - \text{start}}{\text{steps} - 1},
- \ldots,
- \text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{\text{steps} - 1},
- \text{end})
- """
- + """
- From PyTorch 1.11 linspace requires the steps argument. Use steps=100 to restore the previous behavior.
- Args:
- start (float): the starting value for the set of points
- end (float): the ending value for the set of points
- steps (int): size of the constructed tensor
- Keyword arguments:
- {out}
- dtype (torch.dtype, optional): the data type to perform the computation in.
- Default: if None, uses the global default dtype (see torch.get_default_dtype())
- when both :attr:`start` and :attr:`end` are real,
- and corresponding complex dtype when either is complex.
- {layout}
- {device}
- {requires_grad}
- Example::
- >>> torch.linspace(3, 10, steps=5)
- tensor([ 3.0000, 4.7500, 6.5000, 8.2500, 10.0000])
- >>> torch.linspace(-10, 10, steps=5)
- tensor([-10., -5., 0., 5., 10.])
- >>> torch.linspace(start=-10, end=10, steps=5)
- tensor([-10., -5., 0., 5., 10.])
- >>> torch.linspace(start=-10, end=10, steps=1)
- tensor([-10.])
- """.format(
- **factory_common_args
- ),
- )
- add_docstr(
- torch.log,
- r"""
- log(input, *, out=None) -> Tensor
- Returns a new tensor with the natural logarithm of the elements
- of :attr:`input`.
- .. math::
- y_{i} = \log_{e} (x_{i})
- """
- + r"""
- Args:
- {input}
- Keyword args:
- {out}
- Example::
- >>> a = torch.rand(5) * 5
- >>> a
- tensor([4.7767, 4.3234, 1.2156, 0.2411, 4.5739])
- >>> torch.log(a)
- tensor([ 1.5637, 1.4640, 0.1952, -1.4226, 1.5204])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.log10,
- r"""
- log10(input, *, out=None) -> Tensor
- Returns a new tensor with the logarithm to the base 10 of the elements
- of :attr:`input`.
- .. math::
- y_{i} = \log_{10} (x_{i})
- """
- + r"""
- Args:
- {input}
- Keyword args:
- {out}
- Example::
- >>> a = torch.rand(5)
- >>> a
- tensor([ 0.5224, 0.9354, 0.7257, 0.1301, 0.2251])
- >>> torch.log10(a)
- tensor([-0.2820, -0.0290, -0.1392, -0.8857, -0.6476])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.log1p,
- r"""
- log1p(input, *, out=None) -> Tensor
- Returns a new tensor with the natural logarithm of (1 + :attr:`input`).
- .. math::
- y_i = \log_{e} (x_i + 1)
- """
- + r"""
- .. note:: This function is more accurate than :func:`torch.log` for small
- values of :attr:`input`
- Args:
- {input}
- Keyword args:
- {out}
- Example::
- >>> a = torch.randn(5)
- >>> a
- tensor([-1.0090, -0.9923, 1.0249, -0.5372, 0.2492])
- >>> torch.log1p(a)
- tensor([ nan, -4.8653, 0.7055, -0.7705, 0.2225])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.log2,
- r"""
- log2(input, *, out=None) -> Tensor
- Returns a new tensor with the logarithm to the base 2 of the elements
- of :attr:`input`.
- .. math::
- y_{i} = \log_{2} (x_{i})
- """
- + r"""
- Args:
- {input}
- Keyword args:
- {out}
- Example::
- >>> a = torch.rand(5)
- >>> a
- tensor([ 0.8419, 0.8003, 0.9971, 0.5287, 0.0490])
- >>> torch.log2(a)
- tensor([-0.2483, -0.3213, -0.0042, -0.9196, -4.3504])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.logaddexp,
- r"""
- logaddexp(input, other, *, out=None) -> Tensor
- Logarithm of the sum of exponentiations of the inputs.
- Calculates pointwise :math:`\log\left(e^x + e^y\right)`. This function is useful
- in statistics where the calculated probabilities of events may be so small as to
- exceed the range of normal floating point numbers. In such cases the logarithm
- of the calculated probability is stored. This function allows adding
- probabilities stored in such a fashion.
- This op should be disambiguated with :func:`torch.logsumexp` which performs a
- reduction on a single tensor.
- Args:
- {input}
- other (Tensor): the second input tensor
- Keyword arguments:
- {out}
- Example::
- >>> torch.logaddexp(torch.tensor([-1.0]), torch.tensor([-1.0, -2, -3]))
- tensor([-0.3069, -0.6867, -0.8731])
- >>> torch.logaddexp(torch.tensor([-100.0, -200, -300]), torch.tensor([-1.0, -2, -3]))
- tensor([-1., -2., -3.])
- >>> torch.logaddexp(torch.tensor([1.0, 2000, 30000]), torch.tensor([-1.0, -2, -3]))
- tensor([1.1269e+00, 2.0000e+03, 3.0000e+04])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.logaddexp2,
- r"""
- logaddexp2(input, other, *, out=None) -> Tensor
- Logarithm of the sum of exponentiations of the inputs in base-2.
- Calculates pointwise :math:`\log_2\left(2^x + 2^y\right)`. See
- :func:`torch.logaddexp` for more details.
- Args:
- {input}
- other (Tensor): the second input tensor
- Keyword arguments:
- {out}
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.xlogy,
- r"""
- xlogy(input, other, *, out=None) -> Tensor
- Alias for :func:`torch.special.xlogy`.
- """,
- )
- add_docstr(
- torch.logical_and,
- r"""
- logical_and(input, other, *, out=None) -> Tensor
- Computes the element-wise logical AND of the given input tensors. Zeros are treated as ``False`` and nonzeros are
- treated as ``True``.
- Args:
- {input}
- other (Tensor): the tensor to compute AND with
- Keyword args:
- {out}
- Example::
- >>> torch.logical_and(torch.tensor([True, False, True]), torch.tensor([True, False, False]))
- tensor([ True, False, False])
- >>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
- >>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
- >>> torch.logical_and(a, b)
- tensor([False, False, True, False])
- >>> torch.logical_and(a.double(), b.double())
- tensor([False, False, True, False])
- >>> torch.logical_and(a.double(), b)
- tensor([False, False, True, False])
- >>> torch.logical_and(a, b, out=torch.empty(4, dtype=torch.bool))
- tensor([False, False, True, False])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.logical_not,
- r"""
- logical_not(input, *, out=None) -> Tensor
- Computes the element-wise logical NOT of the given input tensor. If not specified, the output tensor will have the bool
- dtype. If the input tensor is not a bool tensor, zeros are treated as ``False`` and non-zeros are treated as ``True``.
- Args:
- {input}
- Keyword args:
- {out}
- Example::
- >>> torch.logical_not(torch.tensor([True, False]))
- tensor([False, True])
- >>> torch.logical_not(torch.tensor([0, 1, -10], dtype=torch.int8))
- tensor([ True, False, False])
- >>> torch.logical_not(torch.tensor([0., 1.5, -10.], dtype=torch.double))
- tensor([ True, False, False])
- >>> torch.logical_not(torch.tensor([0., 1., -10.], dtype=torch.double), out=torch.empty(3, dtype=torch.int16))
- tensor([1, 0, 0], dtype=torch.int16)
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.logical_or,
- r"""
- logical_or(input, other, *, out=None) -> Tensor
- Computes the element-wise logical OR of the given input tensors. Zeros are treated as ``False`` and nonzeros are
- treated as ``True``.
- Args:
- {input}
- other (Tensor): the tensor to compute OR with
- Keyword args:
- {out}
- Example::
- >>> torch.logical_or(torch.tensor([True, False, True]), torch.tensor([True, False, False]))
- tensor([ True, False, True])
- >>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
- >>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
- >>> torch.logical_or(a, b)
- tensor([ True, True, True, False])
- >>> torch.logical_or(a.double(), b.double())
- tensor([ True, True, True, False])
- >>> torch.logical_or(a.double(), b)
- tensor([ True, True, True, False])
- >>> torch.logical_or(a, b, out=torch.empty(4, dtype=torch.bool))
- tensor([ True, True, True, False])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.logical_xor,
- r"""
- logical_xor(input, other, *, out=None) -> Tensor
- Computes the element-wise logical XOR of the given input tensors. Zeros are treated as ``False`` and nonzeros are
- treated as ``True``.
- Args:
- {input}
- other (Tensor): the tensor to compute XOR with
- Keyword args:
- {out}
- Example::
- >>> torch.logical_xor(torch.tensor([True, False, True]), torch.tensor([True, False, False]))
- tensor([False, False, True])
- >>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
- >>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
- >>> torch.logical_xor(a, b)
- tensor([ True, True, False, False])
- >>> torch.logical_xor(a.double(), b.double())
- tensor([ True, True, False, False])
- >>> torch.logical_xor(a.double(), b)
- tensor([ True, True, False, False])
- >>> torch.logical_xor(a, b, out=torch.empty(4, dtype=torch.bool))
- tensor([ True, True, False, False])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.logspace,
- """
- logspace(start, end, steps, base=10.0, *, \
- out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
- """
- + r"""
- Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
- spaced from :math:`{{\text{{base}}}}^{{\text{{start}}}}` to
- :math:`{{\text{{base}}}}^{{\text{{end}}}}`, inclusive, on a logarithmic scale
- with base :attr:`base`. That is, the values are:
- .. math::
- (\text{base}^{\text{start}},
- \text{base}^{(\text{start} + \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
- \ldots,
- \text{base}^{(\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
- \text{base}^{\text{end}})
- """
- + """
- From PyTorch 1.11 logspace requires the steps argument. Use steps=100 to restore the previous behavior.
- Args:
- start (float): the starting value for the set of points
- end (float): the ending value for the set of points
- steps (int): size of the constructed tensor
- base (float, optional): base of the logarithm function. Default: ``10.0``.
- Keyword arguments:
- {out}
- dtype (torch.dtype, optional): the data type to perform the computation in.
- Default: if None, uses the global default dtype (see torch.get_default_dtype())
- when both :attr:`start` and :attr:`end` are real,
- and corresponding complex dtype when either is complex.
- {layout}
- {device}
- {requires_grad}
- Example::
- >>> torch.logspace(start=-10, end=10, steps=5)
- tensor([ 1.0000e-10, 1.0000e-05, 1.0000e+00, 1.0000e+05, 1.0000e+10])
- >>> torch.logspace(start=0.1, end=1.0, steps=5)
- tensor([ 1.2589, 2.1135, 3.5481, 5.9566, 10.0000])
- >>> torch.logspace(start=0.1, end=1.0, steps=1)
- tensor([1.2589])
- >>> torch.logspace(start=2, end=2, steps=1, base=2)
- tensor([4.0])
- """.format(
- **factory_common_args
- ),
- )
- add_docstr(
- torch.logsumexp,
- r"""
- logsumexp(input, dim, keepdim=False, *, out=None)
- Returns the log of summed exponentials of each row of the :attr:`input`
- tensor in the given dimension :attr:`dim`. The computation is numerically
- stabilized.
- For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is
- .. math::
- \text{{logsumexp}}(x)_{{i}} = \log \sum_j \exp(x_{{ij}})
- {keepdim_details}
- Args:
- {input}
- {opt_dim}
- {keepdim}
- Keyword args:
- {out}
- Example::
- >>> a = torch.randn(3, 3)
- >>> torch.logsumexp(a, 1)
- tensor([1.4907, 1.0593, 1.5696])
- >>> torch.dist(torch.logsumexp(a, 1), torch.log(torch.sum(torch.exp(a), 1)))
- tensor(1.6859e-07)
- """.format(
- **multi_dim_common
- ),
- )
- add_docstr(
- torch.lt,
- r"""
- lt(input, other, *, out=None) -> Tensor
- Computes :math:`\text{input} < \text{other}` element-wise.
- """
- + r"""
- The second argument can be a number or a tensor whose shape is
- :ref:`broadcastable <broadcasting-semantics>` with the first argument.
- Args:
- input (Tensor): the tensor to compare
- other (Tensor or float): the tensor or value to compare
- Keyword args:
- {out}
- Returns:
- A boolean tensor that is True where :attr:`input` is less than :attr:`other` and False elsewhere
- Example::
- >>> torch.lt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
- tensor([[False, False], [True, False]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.lu_unpack,
- r"""
- lu_unpack(LU_data, LU_pivots, unpack_data=True, unpack_pivots=True, *, out=None) -> (Tensor, Tensor, Tensor)
- Unpacks the LU decomposition returned by :func:`~linalg.lu_factor` into the `P, L, U` matrices.
- .. seealso::
- :func:`~linalg.lu` returns the matrices from the LU decomposition. Its gradient formula is more efficient
- than that of doing :func:`~linalg.lu_factor` followed by :func:`~linalg.lu_unpack`.
- Args:
- LU_data (Tensor): the packed LU factorization data
- LU_pivots (Tensor): the packed LU factorization pivots
- unpack_data (bool): flag indicating if the data should be unpacked.
- If ``False``, then the returned ``L`` and ``U`` are empty tensors.
- Default: ``True``
- unpack_pivots (bool): flag indicating if the pivots should be unpacked into a permutation matrix ``P``.
- If ``False``, then the returned ``P`` is an empty tensor.
- Default: ``True``
- Keyword args:
- out (tuple, optional): output tuple of three tensors. Ignored if `None`.
- Returns:
- A namedtuple ``(P, L, U)``
- Examples::
- >>> A = torch.randn(2, 3, 3)
- >>> LU, pivots = torch.linalg.lu_factor(A)
- >>> P, L, U = torch.lu_unpack(LU, pivots)
- >>> # We can recover A from the factorization
- >>> A_ = P @ L @ U
- >>> torch.allclose(A, A_)
- True
- >>> # LU factorization of a rectangular matrix:
- >>> A = torch.randn(2, 3, 2)
- >>> LU, pivots = torch.linalg.lu_factor(A)
- >>> P, L, U = torch.lu_unpack(LU, pivots)
- >>> # P, L, U are the same as returned by linalg.lu
- >>> P_, L_, U_ = torch.linalg.lu(A)
- >>> torch.allclose(P, P_) and torch.allclose(L, L_) and torch.allclose(U, U_)
- True
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.less,
- r"""
- less(input, other, *, out=None) -> Tensor
- Alias for :func:`torch.lt`.
- """,
- )
- add_docstr(
- torch.lu_solve,
- r"""
- lu_solve(b, LU_data, LU_pivots, *, out=None) -> Tensor
- Returns the LU solve of the linear system :math:`Ax = b` using the partially pivoted
- LU factorization of A from :func:`~linalg.lu_factor`.
- This function supports ``float``, ``double``, ``cfloat`` and ``cdouble`` dtypes for :attr:`input`.
- .. warning::
- :func:`torch.lu_solve` is deprecated in favor of :func:`torch.linalg.lu_solve`.
- :func:`torch.lu_solve` will be removed in a future PyTorch release.
- ``X = torch.lu_solve(B, LU, pivots)`` should be replaced with
- .. code:: python
- X = linalg.lu_solve(LU, pivots, B)
- Arguments:
- b (Tensor): the RHS tensor of size :math:`(*, m, k)`, where :math:`*`
- is zero or more batch dimensions.
- LU_data (Tensor): the pivoted LU factorization of A from :meth:`~linalg.lu_factor` of size :math:`(*, m, m)`,
- where :math:`*` is zero or more batch dimensions.
- LU_pivots (IntTensor): the pivots of the LU factorization from :meth:`~linalg.lu_factor` of size :math:`(*, m)`,
- where :math:`*` is zero or more batch dimensions.
- The batch dimensions of :attr:`LU_pivots` must be equal to the batch dimensions of
- :attr:`LU_data`.
- Keyword args:
- {out}
- Example::
- >>> A = torch.randn(2, 3, 3)
- >>> b = torch.randn(2, 3, 1)
- >>> LU, pivots = torch.linalg.lu_factor(A)
- >>> x = torch.lu_solve(b, LU, pivots)
- >>> torch.dist(A @ x, b)
- tensor(1.00000e-07 *
- 2.8312)
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.masked_select,
- r"""
- masked_select(input, mask, *, out=None) -> Tensor
- Returns a new 1-D tensor which indexes the :attr:`input` tensor according to
- the boolean mask :attr:`mask` which is a `BoolTensor`.
- The shapes of the :attr:`mask` tensor and the :attr:`input` tensor don't need
- to match, but they must be :ref:`broadcastable <broadcasting-semantics>`.
- .. note:: The returned tensor does **not** use the same storage
- as the original tensor
- Args:
- {input}
- mask (BoolTensor): the tensor containing the binary mask to index with
- Keyword args:
- {out}
- Example::
- >>> x = torch.randn(3, 4)
- >>> x
- tensor([[ 0.3552, -2.3825, -0.8297, 0.3477],
- [-1.2035, 1.2252, 0.5002, 0.6248],
- [ 0.1307, -2.0608, 0.1244, 2.0139]])
- >>> mask = x.ge(0.5)
- >>> mask
- tensor([[False, False, False, False],
- [False, True, True, True],
- [False, False, False, True]])
- >>> torch.masked_select(x, mask)
- tensor([ 1.2252, 0.5002, 0.6248, 2.0139])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.matrix_power,
- r"""
- matrix_power(input, n, *, out=None) -> Tensor
- Alias for :func:`torch.linalg.matrix_power`
- """,
- )
- add_docstr(
- torch.matrix_exp,
- r"""
- matrix_exp(A) -> Tensor
- Alias for :func:`torch.linalg.matrix_exp`.
- """,
- )
- add_docstr(
- torch.max,
- r"""
- max(input) -> Tensor
- Returns the maximum value of all elements in the ``input`` tensor.
- .. warning::
- This function produces deterministic (sub)gradients unlike ``max(dim=0)``
- Args:
- {input}
- Example::
- >>> a = torch.randn(1, 3)
- >>> a
- tensor([[ 0.6763, 0.7445, -2.2369]])
- >>> torch.max(a)
- tensor(0.7445)
- .. function:: max(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
- :noindex:
- Returns a namedtuple ``(values, indices)`` where ``values`` is the maximum
- value of each row of the :attr:`input` tensor in the given dimension
- :attr:`dim`. And ``indices`` is the index location of each maximum value found
- (argmax).
- If ``keepdim`` is ``True``, the output tensors are of the same size
- as ``input`` except in the dimension ``dim`` where they are of size 1.
- Otherwise, ``dim`` is squeezed (see :func:`torch.squeeze`), resulting
- in the output tensors having 1 fewer dimension than ``input``.
- .. note:: If there are multiple maximal values in a reduced row then
- the indices of the first maximal value are returned.
- Args:
- {input}
- {dim}
- {keepdim} Default: ``False``.
- Keyword args:
- out (tuple, optional): the result tuple of two output tensors (max, max_indices)
- Example::
- >>> a = torch.randn(4, 4)
- >>> a
- tensor([[-1.2360, -0.2942, -0.1222, 0.8475],
- [ 1.1949, -1.1127, -2.2379, -0.6702],
- [ 1.5717, -0.9207, 0.1297, -1.8768],
- [-0.6172, 1.0036, -0.6060, -0.2432]])
- >>> torch.max(a, 1)
- torch.return_types.max(values=tensor([0.8475, 1.1949, 1.5717, 1.0036]), indices=tensor([3, 0, 0, 1]))
- .. function:: max(input, other, *, out=None) -> Tensor
- :noindex:
- See :func:`torch.maximum`.
- """.format(
- **single_dim_common
- ),
- )
- add_docstr(
- torch.maximum,
- r"""
- maximum(input, other, *, out=None) -> Tensor
- Computes the element-wise maximum of :attr:`input` and :attr:`other`.
- .. note::
- If one of the elements being compared is a NaN, then that element is returned.
- :func:`maximum` is not supported for tensors with complex dtypes.
- Args:
- {input}
- other (Tensor): the second input tensor
- Keyword args:
- {out}
- Example::
- >>> a = torch.tensor((1, 2, -1))
- >>> b = torch.tensor((3, 0, 4))
- >>> torch.maximum(a, b)
- tensor([3, 2, 4])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.fmax,
- r"""
- fmax(input, other, *, out=None) -> Tensor
- Computes the element-wise maximum of :attr:`input` and :attr:`other`.
- This is like :func:`torch.maximum` except it handles NaNs differently:
- if exactly one of the two elements being compared is a NaN then the non-NaN element is taken as the maximum.
- Only if both elements are NaN is NaN propagated.
- This function is a wrapper around C++'s ``std::fmax`` and is similar to NumPy's ``fmax`` function.
- Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
- :ref:`type promotion <type-promotion-doc>`, and integer and floating-point inputs.
- Args:
- {input}
- other (Tensor): the second input tensor
- Keyword args:
- {out}
- Example::
- >>> a = torch.tensor([9.7, float('nan'), 3.1, float('nan')])
- >>> b = torch.tensor([-2.2, 0.5, float('nan'), float('nan')])
- >>> torch.fmax(a, b)
- tensor([9.7000, 0.5000, 3.1000, nan])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.amax,
- r"""
- amax(input, dim, keepdim=False, *, out=None) -> Tensor
- Returns the maximum value of each slice of the :attr:`input` tensor in the given
- dimension(s) :attr:`dim`.
- .. note::
- The difference between ``max``/``min`` and ``amax``/``amin`` is:
- - ``amax``/``amin`` supports reducing on multiple dimensions,
- - ``amax``/``amin`` does not return indices,
- - ``amax``/``amin`` evenly distributes gradient between equal values,
- while ``max(dim)``/``min(dim)`` propagates gradient only to a single
- index in the source tensor.
- {keepdim_details}
- Args:
- {input}
- {dim}
- {keepdim}
- Keyword args:
- {out}
- Example::
- >>> a = torch.randn(4, 4)
- >>> a
- tensor([[ 0.8177, 1.4878, -0.2491, 0.9130],
- [-0.7158, 1.1775, 2.0992, 0.4817],
- [-0.0053, 0.0164, -1.3738, -0.0507],
- [ 1.9700, 1.1106, -1.0318, -1.0816]])
- >>> torch.amax(a, 1)
- tensor([1.4878, 2.0992, 0.0164, 1.9700])
- """.format(
- **multi_dim_common
- ),
- )
- add_docstr(
- torch.argmax,
- r"""
- argmax(input) -> LongTensor
- Returns the indices of the maximum value of all elements in the :attr:`input` tensor.
- This is the second value returned by :meth:`torch.max`. See its
- documentation for the exact semantics of this method.
- .. note:: If there are multiple maximal values then the indices of the first maximal value are returned.
- Args:
- {input}
- Example::
- >>> a = torch.randn(4, 4)
- >>> a
- tensor([[ 1.3398, 0.2663, -0.2686, 0.2450],
- [-0.7401, -0.8805, -0.3402, -1.1936],
- [ 0.4907, -1.3948, -1.0691, -0.3132],
- [-1.6092, 0.5419, -0.2993, 0.3195]])
- >>> torch.argmax(a)
- tensor(0)
- .. function:: argmax(input, dim, keepdim=False) -> LongTensor
- :noindex:
- Returns the indices of the maximum values of a tensor across a dimension.
- This is the second value returned by :meth:`torch.max`. See its
- documentation for the exact semantics of this method.
- Args:
- {input}
- {dim} If ``None``, the argmax of the flattened input is returned.
- {keepdim} Ignored if ``dim=None``.
- Example::
- >>> a = torch.randn(4, 4)
- >>> a
- tensor([[ 1.3398, 0.2663, -0.2686, 0.2450],
- [-0.7401, -0.8805, -0.3402, -1.1936],
- [ 0.4907, -1.3948, -1.0691, -0.3132],
- [-1.6092, 0.5419, -0.2993, 0.3195]])
- >>> torch.argmax(a, dim=1)
- tensor([ 0, 2, 0, 1])
- """.format(
- **single_dim_common
- ),
- )
- add_docstr(
- torch.argwhere,
- r"""
- argwhere(input) -> Tensor
- Returns a tensor containing the indices of all non-zero elements of
- :attr:`input`. Each row in the result contains the indices of a non-zero
- element in :attr:`input`. The result is sorted lexicographically, with
- the last index changing the fastest (C-style).
- If :attr:`input` has :math:`n` dimensions, then the resulting indices tensor
- :attr:`out` is of size :math:`(z \times n)`, where :math:`z` is the total number of
- non-zero elements in the :attr:`input` tensor.
- .. note::
- This function is similar to NumPy's `argwhere`.
- When :attr:`input` is on CUDA, this function causes host-device synchronization.
- Args:
- {input}
- Example::
- >>> t = torch.tensor([1, 0, 1])
- >>> torch.argwhere(t)
- tensor([[0],
- [2]])
- >>> t = torch.tensor([[1, 0, 1], [0, 1, 1]])
- >>> torch.argwhere(t)
- tensor([[0, 0],
- [0, 2],
- [1, 1],
- [1, 2]])
- """,
- )
- add_docstr(
- torch.mean,
- r"""
- mean(input, *, dtype=None) -> Tensor
- Returns the mean value of all elements in the :attr:`input` tensor.
- Args:
- {input}
- Keyword args:
- {dtype}
- Example::
- >>> a = torch.randn(1, 3)
- >>> a
- tensor([[ 0.2294, -0.5481, 1.3288]])
- >>> torch.mean(a)
- tensor(0.3367)
- .. function:: mean(input, dim, keepdim=False, *, dtype=None, out=None) -> Tensor
- :noindex:
- Returns the mean value of each row of the :attr:`input` tensor in the given
- dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
- reduce over all of them.
- {keepdim_details}
- Args:
- {input}
- {dim}
- {keepdim}
- Keyword args:
- {dtype}
- {out}
- .. seealso::
- :func:`torch.nanmean` computes the mean value of `non-NaN` elements.
- Example::
- >>> a = torch.randn(4, 4)
- >>> a
- tensor([[-0.3841, 0.6320, 0.4254, -0.7384],
- [-0.9644, 1.0131, -0.6549, -1.4279],
- [-0.2951, -1.3350, -0.7694, 0.5600],
- [ 1.0842, -0.9580, 0.3623, 0.2343]])
- >>> torch.mean(a, 1)
- tensor([-0.0163, -0.5085, -0.4599, 0.1807])
- >>> torch.mean(a, 1, True)
- tensor([[-0.0163],
- [-0.5085],
- [-0.4599],
- [ 0.1807]])
- """.format(
- **multi_dim_common
- ),
- )
- add_docstr(
- torch.nanmean,
- r"""
- nanmean(input, dim=None, keepdim=False, *, dtype=None, out=None) -> Tensor
- Computes the mean of all `non-NaN` elements along the specified dimensions.
- This function is identical to :func:`torch.mean` when there are no `NaN` values
- in the :attr:`input` tensor. In the presence of `NaN`, :func:`torch.mean` will
- propagate the `NaN` to the output whereas :func:`torch.nanmean` will ignore the
- `NaN` values (`torch.nanmean(a)` is equivalent to `torch.mean(a[~a.isnan()])`).
- {keepdim_details}
- Args:
- {input}
- {opt_dim}
- {keepdim}
- Keyword args:
- {dtype}
- {out}
- .. seealso::
- :func:`torch.mean` computes the mean value, propagating `NaN`.
- Example::
- >>> x = torch.tensor([[torch.nan, 1, 2], [1, 2, 3]])
- >>> x.mean()
- tensor(nan)
- >>> x.nanmean()
- tensor(1.8000)
- >>> x.mean(dim=0)
- tensor([ nan, 1.5000, 2.5000])
- >>> x.nanmean(dim=0)
- tensor([1.0000, 1.5000, 2.5000])
- # If all elements in the reduced dimensions are NaN then the result is NaN
- >>> torch.tensor([torch.nan]).nanmean()
- tensor(nan)
- """.format(
- **multi_dim_common
- ),
- )
- add_docstr(
- torch.median,
- r"""
- median(input) -> Tensor
- Returns the median of the values in :attr:`input`.
- .. note::
- The median is not unique for :attr:`input` tensors with an even number
- of elements. In this case the lower of the two medians is returned. To
- compute the mean of both medians, use :func:`torch.quantile` with ``q=0.5`` instead.
- .. warning::
- This function produces deterministic (sub)gradients unlike ``median(dim=0)``
- Args:
- {input}
- Example::
- >>> a = torch.randn(1, 3)
- >>> a
- tensor([[ 1.5219, -1.5212, 0.2202]])
- >>> torch.median(a)
- tensor(0.2202)
- .. function:: median(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
- :noindex:
- Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input`
- in the dimension :attr:`dim`, and ``indices`` contains the index of the median values found in the dimension :attr:`dim`.
- By default, :attr:`dim` is the last dimension of the :attr:`input` tensor.
- If :attr:`keepdim` is ``True``, the output tensors are of the same size
- as :attr:`input` except in the dimension :attr:`dim` where they are of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
- the outputs tensor having 1 fewer dimension than :attr:`input`.
- .. note::
- The median is not unique for :attr:`input` tensors with an even number
- of elements in the dimension :attr:`dim`. In this case the lower of the
- two medians is returned. To compute the mean of both medians in
- :attr:`input`, use :func:`torch.quantile` with ``q=0.5`` instead.
- .. warning::
- ``indices`` does not necessarily contain the first occurrence of each
- median value found, unless it is unique.
- The exact implementation details are device-specific.
- Do not expect the same result when run on CPU and GPU in general.
- For the same reason do not expect the gradients to be deterministic.
- Args:
- {input}
- {dim}
- {keepdim}
- Keyword args:
- out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second
- tensor, which must have dtype long, with their indices in the dimension
- :attr:`dim` of :attr:`input`.
- Example::
- >>> a = torch.randn(4, 5)
- >>> a
- tensor([[ 0.2505, -0.3982, -0.9948, 0.3518, -1.3131],
- [ 0.3180, -0.6993, 1.0436, 0.0438, 0.2270],
- [-0.2751, 0.7303, 0.2192, 0.3321, 0.2488],
- [ 1.0778, -1.9510, 0.7048, 0.4742, -0.7125]])
- >>> torch.median(a, 1)
- torch.return_types.median(values=tensor([-0.3982, 0.2270, 0.2488, 0.4742]), indices=tensor([1, 4, 4, 3]))
- """.format(
- **single_dim_common
- ),
- )
- add_docstr(
- torch.nanmedian,
- r"""
- nanmedian(input) -> Tensor
- Returns the median of the values in :attr:`input`, ignoring ``NaN`` values.
- This function is identical to :func:`torch.median` when there are no ``NaN`` values in :attr:`input`.
- When :attr:`input` has one or more ``NaN`` values, :func:`torch.median` will always return ``NaN``,
- while this function will return the median of the non-``NaN`` elements in :attr:`input`.
- If all the elements in :attr:`input` are ``NaN`` it will also return ``NaN``.
- Args:
- {input}
- Example::
- >>> a = torch.tensor([1, float('nan'), 3, 2])
- >>> a.median()
- tensor(nan)
- >>> a.nanmedian()
- tensor(2.)
- .. function:: nanmedian(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
- :noindex:
- Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input`
- in the dimension :attr:`dim`, ignoring ``NaN`` values, and ``indices`` contains the index of the median values
- found in the dimension :attr:`dim`.
- This function is identical to :func:`torch.median` when there are no ``NaN`` values in a reduced row. When a reduced row has
- one or more ``NaN`` values, :func:`torch.median` will always reduce it to ``NaN``, while this function will reduce it to the
- median of the non-``NaN`` elements. If all the elements in a reduced row are ``NaN`` then it will be reduced to ``NaN``, too.
- Args:
- {input}
- {dim}
- {keepdim}
- Keyword args:
- out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second
- tensor, which must have dtype long, with their indices in the dimension
- :attr:`dim` of :attr:`input`.
- Example::
- >>> a = torch.tensor([[2, 3, 1], [float('nan'), 1, float('nan')]])
- >>> a
- tensor([[2., 3., 1.],
- [nan, 1., nan]])
- >>> a.median(0)
- torch.return_types.median(values=tensor([nan, 1., nan]), indices=tensor([1, 1, 1]))
- >>> a.nanmedian(0)
- torch.return_types.nanmedian(values=tensor([2., 1., 1.]), indices=tensor([0, 1, 0]))
- """.format(
- **single_dim_common
- ),
- )
- add_docstr(
- torch.quantile,
- r"""
- quantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor
- Computes the q-th quantiles of each row of the :attr:`input` tensor along the dimension :attr:`dim`.
- To compute the quantile, we map q in [0, 1] to the range of indices [0, n] to find the location
- of the quantile in the sorted input. If the quantile lies between two data points ``a < b`` with
- indices ``i`` and ``j`` in the sorted order, result is computed according to the given
- :attr:`interpolation` method as follows:
- - ``linear``: ``a + (b - a) * fraction``, where ``fraction`` is the fractional part of the computed quantile index.
- - ``lower``: ``a``.
- - ``higher``: ``b``.
- - ``nearest``: ``a`` or ``b``, whichever's index is closer to the computed quantile index (rounding down for .5 fractions).
- - ``midpoint``: ``(a + b) / 2``.
- If :attr:`q` is a 1D tensor, the first dimension of the output represents the quantiles and has size
- equal to the size of :attr:`q`, the remaining dimensions are what remains from the reduction.
- .. note::
- By default :attr:`dim` is ``None`` resulting in the :attr:`input` tensor being flattened before computation.
- Args:
- {input}
- q (float or Tensor): a scalar or 1D tensor of values in the range [0, 1].
- {dim}
- {keepdim}
- Keyword arguments:
- interpolation (str): interpolation method to use when the desired quantile lies between two data points.
- Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``.
- Default is ``linear``.
- {out}
- Example::
- >>> a = torch.randn(2, 3)
- >>> a
- tensor([[ 0.0795, -1.2117, 0.9765],
- [ 1.1707, 0.6706, 0.4884]])
- >>> q = torch.tensor([0.25, 0.5, 0.75])
- >>> torch.quantile(a, q, dim=1, keepdim=True)
- tensor([[[-0.5661],
- [ 0.5795]],
- [[ 0.0795],
- [ 0.6706]],
- [[ 0.5280],
- [ 0.9206]]])
- >>> torch.quantile(a, q, dim=1, keepdim=True).shape
- torch.Size([3, 2, 1])
- >>> a = torch.arange(4.)
- >>> a
- tensor([0., 1., 2., 3.])
- >>> torch.quantile(a, 0.6, interpolation='linear')
- tensor(1.8000)
- >>> torch.quantile(a, 0.6, interpolation='lower')
- tensor(1.)
- >>> torch.quantile(a, 0.6, interpolation='higher')
- tensor(2.)
- >>> torch.quantile(a, 0.6, interpolation='midpoint')
- tensor(1.5000)
- >>> torch.quantile(a, 0.6, interpolation='nearest')
- tensor(2.)
- >>> torch.quantile(a, 0.4, interpolation='nearest')
- tensor(1.)
- """.format(
- **single_dim_common
- ),
- )
- add_docstr(
- torch.nanquantile,
- r"""
- nanquantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor
- This is a variant of :func:`torch.quantile` that "ignores" ``NaN`` values,
- computing the quantiles :attr:`q` as if ``NaN`` values in :attr:`input` did
- not exist. If all values in a reduced row are ``NaN`` then the quantiles for
- that reduction will be ``NaN``. See the documentation for :func:`torch.quantile`.
- Args:
- {input}
- q (float or Tensor): a scalar or 1D tensor of quantile values in the range [0, 1]
- {dim}
- {keepdim}
- Keyword arguments:
- interpolation (str): interpolation method to use when the desired quantile lies between two data points.
- Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``.
- Default is ``linear``.
- {out}
- Example::
- >>> t = torch.tensor([float('nan'), 1, 2])
- >>> t.quantile(0.5)
- tensor(nan)
- >>> t.nanquantile(0.5)
- tensor(1.5000)
- >>> t = torch.tensor([[float('nan'), float('nan')], [1, 2]])
- >>> t
- tensor([[nan, nan],
- [1., 2.]])
- >>> t.nanquantile(0.5, dim=0)
- tensor([1., 2.])
- >>> t.nanquantile(0.5, dim=1)
- tensor([ nan, 1.5000])
- """.format(
- **single_dim_common
- ),
- )
- add_docstr(
- torch.min,
- r"""
- min(input) -> Tensor
- Returns the minimum value of all elements in the :attr:`input` tensor.
- .. warning::
- This function produces deterministic (sub)gradients unlike ``min(dim=0)``
- Args:
- {input}
- Example::
- >>> a = torch.randn(1, 3)
- >>> a
- tensor([[ 0.6750, 1.0857, 1.7197]])
- >>> torch.min(a)
- tensor(0.6750)
- .. function:: min(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
- :noindex:
- Returns a namedtuple ``(values, indices)`` where ``values`` is the minimum
- value of each row of the :attr:`input` tensor in the given dimension
- :attr:`dim`. And ``indices`` is the index location of each minimum value found
- (argmin).
- If :attr:`keepdim` is ``True``, the output tensors are of the same size as
- :attr:`input` except in the dimension :attr:`dim` where they are of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
- the output tensors having 1 fewer dimension than :attr:`input`.
- .. note:: If there are multiple minimal values in a reduced row then
- the indices of the first minimal value are returned.
- Args:
- {input}
- {dim}
- {keepdim}
- Keyword args:
- out (tuple, optional): the tuple of two output tensors (min, min_indices)
- Example::
- >>> a = torch.randn(4, 4)
- >>> a
- tensor([[-0.6248, 1.1334, -1.1899, -0.2803],
- [-1.4644, -0.2635, -0.3651, 0.6134],
- [ 0.2457, 0.0384, 1.0128, 0.7015],
- [-0.1153, 2.9849, 2.1458, 0.5788]])
- >>> torch.min(a, 1)
- torch.return_types.min(values=tensor([-1.1899, -1.4644, 0.0384, -0.1153]), indices=tensor([2, 0, 1, 0]))
- .. function:: min(input, other, *, out=None) -> Tensor
- :noindex:
- See :func:`torch.minimum`.
- """.format(
- **single_dim_common
- ),
- )
- add_docstr(
- torch.minimum,
- r"""
- minimum(input, other, *, out=None) -> Tensor
- Computes the element-wise minimum of :attr:`input` and :attr:`other`.
- .. note::
- If one of the elements being compared is a NaN, then that element is returned.
- :func:`minimum` is not supported for tensors with complex dtypes.
- Args:
- {input}
- other (Tensor): the second input tensor
- Keyword args:
- {out}
- Example::
- >>> a = torch.tensor((1, 2, -1))
- >>> b = torch.tensor((3, 0, 4))
- >>> torch.minimum(a, b)
- tensor([1, 0, -1])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.fmin,
- r"""
- fmin(input, other, *, out=None) -> Tensor
- Computes the element-wise minimum of :attr:`input` and :attr:`other`.
- This is like :func:`torch.minimum` except it handles NaNs differently:
- if exactly one of the two elements being compared is a NaN then the non-NaN element is taken as the minimum.
- Only if both elements are NaN is NaN propagated.
- This function is a wrapper around C++'s ``std::fmin`` and is similar to NumPy's ``fmin`` function.
- Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
- :ref:`type promotion <type-promotion-doc>`, and integer and floating-point inputs.
- Args:
- {input}
- other (Tensor): the second input tensor
- Keyword args:
- {out}
- Example::
- >>> a = torch.tensor([2.2, float('nan'), 2.1, float('nan')])
- >>> b = torch.tensor([-9.3, 0.1, float('nan'), float('nan')])
- >>> torch.fmin(a, b)
- tensor([-9.3000, 0.1000, 2.1000, nan])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.amin,
- r"""
- amin(input, dim, keepdim=False, *, out=None) -> Tensor
- Returns the minimum value of each slice of the :attr:`input` tensor in the given
- dimension(s) :attr:`dim`.
- .. note::
- The difference between ``max``/``min`` and ``amax``/``amin`` is:
- - ``amax``/``amin`` supports reducing on multiple dimensions,
- - ``amax``/``amin`` does not return indices,
- - ``amax``/``amin`` evenly distributes gradient between equal values,
- while ``max(dim)``/``min(dim)`` propagates gradient only to a single
- index in the source tensor.
- {keepdim_details}
- Args:
- {input}
- {dim}
- {keepdim}
- Keyword args:
- {out}
- Example::
- >>> a = torch.randn(4, 4)
- >>> a
- tensor([[ 0.6451, -0.4866, 0.2987, -1.3312],
- [-0.5744, 1.2980, 1.8397, -0.2713],
- [ 0.9128, 0.9214, -1.7268, -0.2995],
- [ 0.9023, 0.4853, 0.9075, -1.6165]])
- >>> torch.amin(a, 1)
- tensor([-1.3312, -0.5744, -1.7268, -1.6165])
- """.format(
- **multi_dim_common
- ),
- )
- add_docstr(
- torch.aminmax,
- r"""
- aminmax(input, *, dim=None, keepdim=False, out=None) -> (Tensor min, Tensor max)
- Computes the minimum and maximum values of the :attr:`input` tensor.
- Args:
- input (Tensor):
- The input tensor
- Keyword Args:
- dim (Optional[int]):
- The dimension along which to compute the values. If `None`,
- computes the values over the entire :attr:`input` tensor.
- Default is `None`.
- keepdim (bool):
- If `True`, the reduced dimensions will be kept in the output
- tensor as dimensions with size 1 for broadcasting, otherwise
- they will be removed, as if calling (:func:`torch.squeeze`).
- Default is `False`.
- out (Optional[Tuple[Tensor, Tensor]]):
- Optional tensors on which to write the result. Must have the same
- shape and dtype as the expected output.
- Default is `None`.
- Returns:
- A named tuple `(min, max)` containing the minimum and maximum values.
- Raises:
- RuntimeError
- If any of the dimensions to compute the values over has size 0.
- .. note::
- NaN values are propagated to the output if at least one value is NaN.
- .. seealso::
- :func:`torch.amin` computes just the minimum value
- :func:`torch.amax` computes just the maximum value
- Example::
- >>> torch.aminmax(torch.tensor([1, -3, 5]))
- torch.return_types.aminmax(
- min=tensor(-3),
- max=tensor(5))
- >>> # aminmax propagates NaNs
- >>> torch.aminmax(torch.tensor([1, -3, 5, torch.nan]))
- torch.return_types.aminmax(
- min=tensor(nan),
- max=tensor(nan))
- >>> t = torch.arange(10).view(2, 5)
- >>> t
- tensor([[0, 1, 2, 3, 4],
- [5, 6, 7, 8, 9]])
- >>> t.aminmax(dim=0, keepdim=True)
- torch.return_types.aminmax(
- min=tensor([[0, 1, 2, 3, 4]]),
- max=tensor([[5, 6, 7, 8, 9]]))
- """,
- )
- add_docstr(
- torch.argmin,
- r"""
- argmin(input, dim=None, keepdim=False) -> LongTensor
- Returns the indices of the minimum value(s) of the flattened tensor or along a dimension
- This is the second value returned by :meth:`torch.min`. See its
- documentation for the exact semantics of this method.
- .. note:: If there are multiple minimal values then the indices of the first minimal value are returned.
- Args:
- {input}
- {dim} If ``None``, the argmin of the flattened input is returned.
- {keepdim}.
- Example::
- >>> a = torch.randn(4, 4)
- >>> a
- tensor([[ 0.1139, 0.2254, -0.1381, 0.3687],
- [ 1.0100, -1.1975, -0.0102, -0.4732],
- [-0.9240, 0.1207, -0.7506, -1.0213],
- [ 1.7809, -1.2960, 0.9384, 0.1438]])
- >>> torch.argmin(a)
- tensor(13)
- >>> torch.argmin(a, dim=1)
- tensor([ 2, 1, 3, 1])
- >>> torch.argmin(a, dim=1, keepdim=True)
- tensor([[2],
- [1],
- [3],
- [1]])
- """.format(
- **single_dim_common
- ),
- )
- add_docstr(
- torch.mm,
- r"""
- mm(input, mat2, *, out=None) -> Tensor
- Performs a matrix multiplication of the matrices :attr:`input` and :attr:`mat2`.
- If :attr:`input` is a :math:`(n \times m)` tensor, :attr:`mat2` is a
- :math:`(m \times p)` tensor, :attr:`out` will be a :math:`(n \times p)` tensor.
- .. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
- For broadcasting matrix products, see :func:`torch.matmul`.
- Supports strided and sparse 2-D tensors as inputs, autograd with
- respect to strided inputs.
- This operation has support for arguments with :ref:`sparse layouts<sparse-docs>`.
- If :attr:`out` is provided it's layout will be used. Otherwise, the result
- layout will be deduced from that of :attr:`input`.
- {sparse_beta_warning}
- {tf32_note}
- {rocm_fp16_note}
- Args:
- input (Tensor): the first matrix to be matrix multiplied
- mat2 (Tensor): the second matrix to be matrix multiplied
- Keyword args:
- {out}
- Example::
- >>> mat1 = torch.randn(2, 3)
- >>> mat2 = torch.randn(3, 3)
- >>> torch.mm(mat1, mat2)
- tensor([[ 0.4851, 0.5037, -0.3633],
- [-0.0760, -3.6705, 2.4784]])
- """.format(
- **common_args, **tf32_notes, **rocm_fp16_notes, **sparse_support_notes
- ),
- )
- add_docstr(
- torch.hspmm,
- r"""
- hspmm(mat1, mat2, *, out=None) -> Tensor
- Performs a matrix multiplication of a :ref:`sparse COO matrix
- <sparse-coo-docs>` :attr:`mat1` and a strided matrix :attr:`mat2`. The
- result is a (1 + 1)-dimensional :ref:`hybrid COO matrix
- <sparse-hybrid-coo-docs>`.
- Args:
- mat1 (Tensor): the first sparse matrix to be matrix multiplied
- mat2 (Tensor): the second strided matrix to be matrix multiplied
- Keyword args:
- {out}
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.matmul,
- r"""
- matmul(input, other, *, out=None) -> Tensor
- Matrix product of two tensors.
- The behavior depends on the dimensionality of the tensors as follows:
- - If both tensors are 1-dimensional, the dot product (scalar) is returned.
- - If both arguments are 2-dimensional, the matrix-matrix product is returned.
- - If the first argument is 1-dimensional and the second argument is 2-dimensional,
- a 1 is prepended to its dimension for the purpose of the matrix multiply.
- After the matrix multiply, the prepended dimension is removed.
- - If the first argument is 2-dimensional and the second argument is 1-dimensional,
- the matrix-vector product is returned.
- - If both arguments are at least 1-dimensional and at least one argument is
- N-dimensional (where N > 2), then a batched matrix multiply is returned. If the first
- argument is 1-dimensional, a 1 is prepended to its dimension for the purpose of the
- batched matrix multiply and removed after. If the second argument is 1-dimensional, a
- 1 is appended to its dimension for the purpose of the batched matrix multiple and removed after.
- The non-matrix (i.e. batch) dimensions are :ref:`broadcasted <broadcasting-semantics>` (and thus
- must be broadcastable). For example, if :attr:`input` is a
- :math:`(j \times 1 \times n \times n)` tensor and :attr:`other` is a :math:`(k \times n \times n)`
- tensor, :attr:`out` will be a :math:`(j \times k \times n \times n)` tensor.
- Note that the broadcasting logic only looks at the batch dimensions when determining if the inputs
- are broadcastable, and not the matrix dimensions. For example, if :attr:`input` is a
- :math:`(j \times 1 \times n \times m)` tensor and :attr:`other` is a :math:`(k \times m \times p)`
- tensor, these inputs are valid for broadcasting even though the final two dimensions (i.e. the
- matrix dimensions) are different. :attr:`out` will be a :math:`(j \times k \times n \times p)` tensor.
- This operation has support for arguments with :ref:`sparse layouts<sparse-docs>`. In particular the
- matrix-matrix (both arguments 2-dimensional) supports sparse arguments with the same restrictions
- as :func:`torch.mm`
- {sparse_beta_warning}
- {tf32_note}
- {rocm_fp16_note}
- .. note::
- The 1-dimensional dot product version of this function does not support an :attr:`out` parameter.
- Arguments:
- input (Tensor): the first tensor to be multiplied
- other (Tensor): the second tensor to be multiplied
- Keyword args:
- {out}
- Example::
- >>> # vector x vector
- >>> tensor1 = torch.randn(3)
- >>> tensor2 = torch.randn(3)
- >>> torch.matmul(tensor1, tensor2).size()
- torch.Size([])
- >>> # matrix x vector
- >>> tensor1 = torch.randn(3, 4)
- >>> tensor2 = torch.randn(4)
- >>> torch.matmul(tensor1, tensor2).size()
- torch.Size([3])
- >>> # batched matrix x broadcasted vector
- >>> tensor1 = torch.randn(10, 3, 4)
- >>> tensor2 = torch.randn(4)
- >>> torch.matmul(tensor1, tensor2).size()
- torch.Size([10, 3])
- >>> # batched matrix x batched matrix
- >>> tensor1 = torch.randn(10, 3, 4)
- >>> tensor2 = torch.randn(10, 4, 5)
- >>> torch.matmul(tensor1, tensor2).size()
- torch.Size([10, 3, 5])
- >>> # batched matrix x broadcasted matrix
- >>> tensor1 = torch.randn(10, 3, 4)
- >>> tensor2 = torch.randn(4, 5)
- >>> torch.matmul(tensor1, tensor2).size()
- torch.Size([10, 3, 5])
- """.format(
- **common_args, **tf32_notes, **rocm_fp16_notes, **sparse_support_notes
- ),
- )
- add_docstr(
- torch.mode,
- r"""
- mode(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
- Returns a namedtuple ``(values, indices)`` where ``values`` is the mode
- value of each row of the :attr:`input` tensor in the given dimension
- :attr:`dim`, i.e. a value which appears most often
- in that row, and ``indices`` is the index location of each mode value found.
- By default, :attr:`dim` is the last dimension of the :attr:`input` tensor.
- If :attr:`keepdim` is ``True``, the output tensors are of the same size as
- :attr:`input` except in the dimension :attr:`dim` where they are of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting
- in the output tensors having 1 fewer dimension than :attr:`input`.
- .. note:: This function is not defined for ``torch.cuda.Tensor`` yet.
- Args:
- {input}
- {dim}
- {keepdim}
- Keyword args:
- out (tuple, optional): the result tuple of two output tensors (values, indices)
- Example::
- >>> a = torch.randint(10, (5,))
- >>> a
- tensor([6, 5, 1, 0, 2])
- >>> b = a + (torch.randn(50, 1) * 5).long()
- >>> torch.mode(b, 0)
- torch.return_types.mode(values=tensor([6, 5, 1, 0, 2]), indices=tensor([2, 2, 2, 2, 2]))
- """.format(
- **single_dim_common
- ),
- )
- add_docstr(
- torch.mul,
- r"""
- mul(input, other, *, out=None) -> Tensor
- Multiplies :attr:`input` by :attr:`other`.
- .. math::
- \text{out}_i = \text{input}_i \times \text{other}_i
- """
- + r"""
- Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
- :ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
- Args:
- {input}
- other (Tensor or Number) - the tensor or number to multiply input by.
- Keyword args:
- {out}
- Examples::
- >>> a = torch.randn(3)
- >>> a
- tensor([ 0.2015, -0.4255, 2.6087])
- >>> torch.mul(a, 100)
- tensor([ 20.1494, -42.5491, 260.8663])
- >>> b = torch.randn(4, 1)
- >>> b
- tensor([[ 1.1207],
- [-0.3137],
- [ 0.0700],
- [ 0.8378]])
- >>> c = torch.randn(1, 4)
- >>> c
- tensor([[ 0.5146, 0.1216, -0.5244, 2.2382]])
- >>> torch.mul(b, c)
- tensor([[ 0.5767, 0.1363, -0.5877, 2.5083],
- [-0.1614, -0.0382, 0.1645, -0.7021],
- [ 0.0360, 0.0085, -0.0367, 0.1567],
- [ 0.4312, 0.1019, -0.4394, 1.8753]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.multiply,
- r"""
- multiply(input, other, *, out=None)
- Alias for :func:`torch.mul`.
- """,
- )
- add_docstr(
- torch.multinomial,
- r"""
- multinomial(input, num_samples, replacement=False, *, generator=None, out=None) -> LongTensor
- Returns a tensor where each row contains :attr:`num_samples` indices sampled
- from the multinomial probability distribution located in the corresponding row
- of tensor :attr:`input`.
- .. note::
- The rows of :attr:`input` do not need to sum to one (in which case we use
- the values as weights), but must be non-negative, finite and have
- a non-zero sum.
- Indices are ordered from left to right according to when each was sampled
- (first samples are placed in first column).
- If :attr:`input` is a vector, :attr:`out` is a vector of size :attr:`num_samples`.
- If :attr:`input` is a matrix with `m` rows, :attr:`out` is an matrix of shape
- :math:`(m \times \text{{num\_samples}})`.
- If replacement is ``True``, samples are drawn with replacement.
- If not, they are drawn without replacement, which means that when a
- sample index is drawn for a row, it cannot be drawn again for that row.
- .. note::
- When drawn without replacement, :attr:`num_samples` must be lower than
- number of non-zero elements in :attr:`input` (or the min number of non-zero
- elements in each row of :attr:`input` if it is a matrix).
- Args:
- input (Tensor): the input tensor containing probabilities
- num_samples (int): number of samples to draw
- replacement (bool, optional): whether to draw with replacement or not
- Keyword args:
- {generator}
- {out}
- Example::
- >>> weights = torch.tensor([0, 10, 3, 0], dtype=torch.float) # create a tensor of weights
- >>> torch.multinomial(weights, 2)
- tensor([1, 2])
- >>> torch.multinomial(weights, 4) # ERROR!
- RuntimeError: invalid argument 2: invalid multinomial distribution (with replacement=False,
- not enough non-negative category to sample) at ../aten/src/TH/generic/THTensorRandom.cpp:320
- >>> torch.multinomial(weights, 4, replacement=True)
- tensor([ 2, 1, 1, 1])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.mv,
- r"""
- mv(input, vec, *, out=None) -> Tensor
- Performs a matrix-vector product of the matrix :attr:`input` and the vector
- :attr:`vec`.
- If :attr:`input` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of
- size :math:`m`, :attr:`out` will be 1-D of size :math:`n`.
- .. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
- Args:
- input (Tensor): matrix to be multiplied
- vec (Tensor): vector to be multiplied
- Keyword args:
- {out}
- Example::
- >>> mat = torch.randn(2, 3)
- >>> vec = torch.randn(3)
- >>> torch.mv(mat, vec)
- tensor([ 1.0404, -0.6361])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.mvlgamma,
- r"""
- mvlgamma(input, p, *, out=None) -> Tensor
- Alias for :func:`torch.special.multigammaln`.
- """,
- )
- add_docstr(
- torch.movedim,
- r"""
- movedim(input, source, destination) -> Tensor
- Moves the dimension(s) of :attr:`input` at the position(s) in :attr:`source`
- to the position(s) in :attr:`destination`.
- Other dimensions of :attr:`input` that are not explicitly moved remain in
- their original order and appear at the positions not specified in :attr:`destination`.
- Args:
- {input}
- source (int or tuple of ints): Original positions of the dims to move. These must be unique.
- destination (int or tuple of ints): Destination positions for each of the original dims. These must also be unique.
- Examples::
- >>> t = torch.randn(3,2,1)
- >>> t
- tensor([[[-0.3362],
- [-0.8437]],
- [[-0.9627],
- [ 0.1727]],
- [[ 0.5173],
- [-0.1398]]])
- >>> torch.movedim(t, 1, 0).shape
- torch.Size([2, 3, 1])
- >>> torch.movedim(t, 1, 0)
- tensor([[[-0.3362],
- [-0.9627],
- [ 0.5173]],
- [[-0.8437],
- [ 0.1727],
- [-0.1398]]])
- >>> torch.movedim(t, (1, 2), (0, 1)).shape
- torch.Size([2, 1, 3])
- >>> torch.movedim(t, (1, 2), (0, 1))
- tensor([[[-0.3362, -0.9627, 0.5173]],
- [[-0.8437, 0.1727, -0.1398]]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.moveaxis,
- r"""
- moveaxis(input, source, destination) -> Tensor
- Alias for :func:`torch.movedim`.
- This function is equivalent to NumPy's moveaxis function.
- Examples::
- >>> t = torch.randn(3,2,1)
- >>> t
- tensor([[[-0.3362],
- [-0.8437]],
- [[-0.9627],
- [ 0.1727]],
- [[ 0.5173],
- [-0.1398]]])
- >>> torch.moveaxis(t, 1, 0).shape
- torch.Size([2, 3, 1])
- >>> torch.moveaxis(t, 1, 0)
- tensor([[[-0.3362],
- [-0.9627],
- [ 0.5173]],
- [[-0.8437],
- [ 0.1727],
- [-0.1398]]])
- >>> torch.moveaxis(t, (1, 2), (0, 1)).shape
- torch.Size([2, 1, 3])
- >>> torch.moveaxis(t, (1, 2), (0, 1))
- tensor([[[-0.3362, -0.9627, 0.5173]],
- [[-0.8437, 0.1727, -0.1398]]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.swapdims,
- r"""
- swapdims(input, dim0, dim1) -> Tensor
- Alias for :func:`torch.transpose`.
- This function is equivalent to NumPy's swapaxes function.
- Examples::
- >>> x = torch.tensor([[[0,1],[2,3]],[[4,5],[6,7]]])
- >>> x
- tensor([[[0, 1],
- [2, 3]],
- [[4, 5],
- [6, 7]]])
- >>> torch.swapdims(x, 0, 1)
- tensor([[[0, 1],
- [4, 5]],
- [[2, 3],
- [6, 7]]])
- >>> torch.swapdims(x, 0, 2)
- tensor([[[0, 4],
- [2, 6]],
- [[1, 5],
- [3, 7]]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.swapaxes,
- r"""
- swapaxes(input, axis0, axis1) -> Tensor
- Alias for :func:`torch.transpose`.
- This function is equivalent to NumPy's swapaxes function.
- Examples::
- >>> x = torch.tensor([[[0,1],[2,3]],[[4,5],[6,7]]])
- >>> x
- tensor([[[0, 1],
- [2, 3]],
- [[4, 5],
- [6, 7]]])
- >>> torch.swapaxes(x, 0, 1)
- tensor([[[0, 1],
- [4, 5]],
- [[2, 3],
- [6, 7]]])
- >>> torch.swapaxes(x, 0, 2)
- tensor([[[0, 4],
- [2, 6]],
- [[1, 5],
- [3, 7]]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.narrow,
- r"""
- narrow(input, dim, start, length) -> Tensor
- Returns a new tensor that is a narrowed version of :attr:`input` tensor. The
- dimension :attr:`dim` is input from :attr:`start` to ``start + length``. The
- returned tensor and :attr:`input` tensor share the same underlying storage.
- Args:
- input (Tensor): the tensor to narrow
- dim (int): the dimension along which to narrow
- start (int or Tensor): index of the element to start the narrowed dimension
- from. Can be negative, which means indexing from the end of `dim`. If
- `Tensor`, it must be an 0-dim integral `Tensor` (bools not allowed)
- length (int): length of the narrowed dimension, must be weakly positive
- Example::
- >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
- >>> torch.narrow(x, 0, 0, 2)
- tensor([[ 1, 2, 3],
- [ 4, 5, 6]])
- >>> torch.narrow(x, 1, 1, 2)
- tensor([[ 2, 3],
- [ 5, 6],
- [ 8, 9]])
- >>> torch.narrow(x, -1, torch.tensor(-1), 1)
- tensor([[3],
- [6],
- [9]])
- """,
- )
- add_docstr(
- torch.narrow_copy,
- r"""
- narrow_copy(input, dim, start, length, *, out=None) -> Tensor
- Same as :meth:`Tensor.narrow` except this returns a copy rather
- than shared storage. This is primarily for sparse tensors, which
- do not have a shared-storage narrow method.
- Args:
- input (Tensor): the tensor to narrow
- dim (int): the dimension along which to narrow
- start (int): index of the element to start the narrowed dimension from. Can
- be negative, which means indexing from the end of `dim`
- length (int): length of the narrowed dimension, must be weakly positive
- Keyword args:
- {out}
- Example::
- >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
- >>> torch.narrow_copy(x, 0, 0, 2)
- tensor([[ 1, 2, 3],
- [ 4, 5, 6]])
- >>> torch.narrow_copy(x, 1, 1, 2)
- tensor([[ 2, 3],
- [ 5, 6],
- [ 8, 9]])
- >>> s = torch.arange(16).reshape(2, 2, 2, 2).to_sparse(2)
- >>> torch.narrow_copy(s, 0, 0, 1)
- tensor(indices=tensor([[0, 0],
- [0, 1]]),
- values=tensor([[[0, 1],
- [2, 3]],
- [[4, 5],
- [6, 7]]]),
- size=(1, 2, 2, 2), nnz=2, layout=torch.sparse_coo)
- .. seealso::
- :func:`torch.narrow` for a non copy variant
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.nan_to_num,
- r"""
- nan_to_num(input, nan=0.0, posinf=None, neginf=None, *, out=None) -> Tensor
- Replaces :literal:`NaN`, positive infinity, and negative infinity values in :attr:`input`
- with the values specified by :attr:`nan`, :attr:`posinf`, and :attr:`neginf`, respectively.
- By default, :literal:`NaN`\ s are replaced with zero, positive infinity is replaced with the
- greatest finite value representable by :attr:`input`'s dtype, and negative infinity
- is replaced with the least finite value representable by :attr:`input`'s dtype.
- Args:
- {input}
- nan (Number, optional): the value to replace :literal:`NaN`\s with. Default is zero.
- posinf (Number, optional): if a Number, the value to replace positive infinity values with.
- If None, positive infinity values are replaced with the greatest finite value representable by :attr:`input`'s dtype.
- Default is None.
- neginf (Number, optional): if a Number, the value to replace negative infinity values with.
- If None, negative infinity values are replaced with the lowest finite value representable by :attr:`input`'s dtype.
- Default is None.
- Keyword args:
- {out}
- Example::
- >>> x = torch.tensor([float('nan'), float('inf'), -float('inf'), 3.14])
- >>> torch.nan_to_num(x)
- tensor([ 0.0000e+00, 3.4028e+38, -3.4028e+38, 3.1400e+00])
- >>> torch.nan_to_num(x, nan=2.0)
- tensor([ 2.0000e+00, 3.4028e+38, -3.4028e+38, 3.1400e+00])
- >>> torch.nan_to_num(x, nan=2.0, posinf=1.0)
- tensor([ 2.0000e+00, 1.0000e+00, -3.4028e+38, 3.1400e+00])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.ne,
- r"""
- ne(input, other, *, out=None) -> Tensor
- Computes :math:`\text{input} \neq \text{other}` element-wise.
- """
- + r"""
- The second argument can be a number or a tensor whose shape is
- :ref:`broadcastable <broadcasting-semantics>` with the first argument.
- Args:
- input (Tensor): the tensor to compare
- other (Tensor or float): the tensor or value to compare
- Keyword args:
- {out}
- Returns:
- A boolean tensor that is True where :attr:`input` is not equal to :attr:`other` and False elsewhere
- Example::
- >>> torch.ne(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
- tensor([[False, True], [True, False]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.not_equal,
- r"""
- not_equal(input, other, *, out=None) -> Tensor
- Alias for :func:`torch.ne`.
- """,
- )
- add_docstr(
- torch.neg,
- r"""
- neg(input, *, out=None) -> Tensor
- Returns a new tensor with the negative of the elements of :attr:`input`.
- .. math::
- \text{out} = -1 \times \text{input}
- """
- + r"""
- Args:
- {input}
- Keyword args:
- {out}
- Example::
- >>> a = torch.randn(5)
- >>> a
- tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940])
- >>> torch.neg(a)
- tensor([-0.0090, 0.2262, 0.0682, 0.2866, -0.3940])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.negative,
- r"""
- negative(input, *, out=None) -> Tensor
- Alias for :func:`torch.neg`
- """,
- )
- add_docstr(
- torch.nextafter,
- r"""
- nextafter(input, other, *, out=None) -> Tensor
- Return the next floating-point value after :attr:`input` towards :attr:`other`, elementwise.
- The shapes of ``input`` and ``other`` must be
- :ref:`broadcastable <broadcasting-semantics>`.
- Args:
- input (Tensor): the first input tensor
- other (Tensor): the second input tensor
- Keyword args:
- {out}
- Example::
- >>> eps = torch.finfo(torch.float32).eps
- >>> torch.nextafter(torch.tensor([1.0, 2.0]), torch.tensor([2.0, 1.0])) == torch.tensor([eps + 1, 2 - eps])
- tensor([True, True])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.nonzero,
- r"""
- nonzero(input, *, out=None, as_tuple=False) -> LongTensor or tuple of LongTensors
- .. note::
- :func:`torch.nonzero(..., as_tuple=False) <torch.nonzero>` (default) returns a
- 2-D tensor where each row is the index for a nonzero value.
- :func:`torch.nonzero(..., as_tuple=True) <torch.nonzero>` returns a tuple of 1-D
- index tensors, allowing for advanced indexing, so ``x[x.nonzero(as_tuple=True)]``
- gives all nonzero values of tensor ``x``. Of the returned tuple, each index tensor
- contains nonzero indices for a certain dimension.
- See below for more details on the two behaviors.
- When :attr:`input` is on CUDA, :func:`torch.nonzero() <torch.nonzero>` causes
- host-device synchronization.
- **When** :attr:`as_tuple` **is** ``False`` **(default)**:
- Returns a tensor containing the indices of all non-zero elements of
- :attr:`input`. Each row in the result contains the indices of a non-zero
- element in :attr:`input`. The result is sorted lexicographically, with
- the last index changing the fastest (C-style).
- If :attr:`input` has :math:`n` dimensions, then the resulting indices tensor
- :attr:`out` is of size :math:`(z \times n)`, where :math:`z` is the total number of
- non-zero elements in the :attr:`input` tensor.
- **When** :attr:`as_tuple` **is** ``True``:
- Returns a tuple of 1-D tensors, one for each dimension in :attr:`input`,
- each containing the indices (in that dimension) of all non-zero elements of
- :attr:`input` .
- If :attr:`input` has :math:`n` dimensions, then the resulting tuple contains :math:`n`
- tensors of size :math:`z`, where :math:`z` is the total number of
- non-zero elements in the :attr:`input` tensor.
- As a special case, when :attr:`input` has zero dimensions and a nonzero scalar
- value, it is treated as a one-dimensional tensor with one element.
- Args:
- {input}
- Keyword args:
- out (LongTensor, optional): the output tensor containing indices
- Returns:
- LongTensor or tuple of LongTensor: If :attr:`as_tuple` is ``False``, the output
- tensor containing indices. If :attr:`as_tuple` is ``True``, one 1-D tensor for
- each dimension, containing the indices of each nonzero element along that
- dimension.
- Example::
- >>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1]))
- tensor([[ 0],
- [ 1],
- [ 2],
- [ 4]])
- >>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0],
- ... [0.0, 0.4, 0.0, 0.0],
- ... [0.0, 0.0, 1.2, 0.0],
- ... [0.0, 0.0, 0.0,-0.4]]))
- tensor([[ 0, 0],
- [ 1, 1],
- [ 2, 2],
- [ 3, 3]])
- >>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1]), as_tuple=True)
- (tensor([0, 1, 2, 4]),)
- >>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0],
- ... [0.0, 0.4, 0.0, 0.0],
- ... [0.0, 0.0, 1.2, 0.0],
- ... [0.0, 0.0, 0.0,-0.4]]), as_tuple=True)
- (tensor([0, 1, 2, 3]), tensor([0, 1, 2, 3]))
- >>> torch.nonzero(torch.tensor(5), as_tuple=True)
- (tensor([0]),)
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.normal,
- r"""
- normal(mean, std, *, generator=None, out=None) -> Tensor
- Returns a tensor of random numbers drawn from separate normal distributions
- whose mean and standard deviation are given.
- The :attr:`mean` is a tensor with the mean of
- each output element's normal distribution
- The :attr:`std` is a tensor with the standard deviation of
- each output element's normal distribution
- The shapes of :attr:`mean` and :attr:`std` don't need to match, but the
- total number of elements in each tensor need to be the same.
- .. note:: When the shapes do not match, the shape of :attr:`mean`
- is used as the shape for the returned output tensor
- .. note:: When :attr:`std` is a CUDA tensor, this function synchronizes
- its device with the CPU.
- Args:
- mean (Tensor): the tensor of per-element means
- std (Tensor): the tensor of per-element standard deviations
- Keyword args:
- {generator}
- {out}
- Example::
- >>> torch.normal(mean=torch.arange(1., 11.), std=torch.arange(1, 0, -0.1))
- tensor([ 1.0425, 3.5672, 2.7969, 4.2925, 4.7229, 6.2134,
- 8.0505, 8.1408, 9.0563, 10.0566])
- .. function:: normal(mean=0.0, std, *, out=None) -> Tensor
- :noindex:
- Similar to the function above, but the means are shared among all drawn
- elements.
- Args:
- mean (float, optional): the mean for all distributions
- std (Tensor): the tensor of per-element standard deviations
- Keyword args:
- {out}
- Example::
- >>> torch.normal(mean=0.5, std=torch.arange(1., 6.))
- tensor([-1.2793, -1.0732, -2.0687, 5.1177, -1.2303])
- .. function:: normal(mean, std=1.0, *, out=None) -> Tensor
- :noindex:
- Similar to the function above, but the standard deviations are shared among
- all drawn elements.
- Args:
- mean (Tensor): the tensor of per-element means
- std (float, optional): the standard deviation for all distributions
- Keyword args:
- out (Tensor, optional): the output tensor
- Example::
- >>> torch.normal(mean=torch.arange(1., 6.))
- tensor([ 1.1552, 2.6148, 2.6535, 5.8318, 4.2361])
- .. function:: normal(mean, std, size, *, out=None) -> Tensor
- :noindex:
- Similar to the function above, but the means and standard deviations are shared
- among all drawn elements. The resulting tensor has size given by :attr:`size`.
- Args:
- mean (float): the mean for all distributions
- std (float): the standard deviation for all distributions
- size (int...): a sequence of integers defining the shape of the output tensor.
- Keyword args:
- {out}
- Example::
- >>> torch.normal(2, 3, size=(1, 4))
- tensor([[-1.3987, -1.9544, 3.6048, 0.7909]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.numel,
- r"""
- numel(input) -> int
- Returns the total number of elements in the :attr:`input` tensor.
- Args:
- {input}
- Example::
- >>> a = torch.randn(1, 2, 3, 4, 5)
- >>> torch.numel(a)
- 120
- >>> a = torch.zeros(4,4)
- >>> torch.numel(a)
- 16
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.ones,
- r"""
- ones(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
- Returns a tensor filled with the scalar value `1`, with the shape defined
- by the variable argument :attr:`size`.
- Args:
- size (int...): a sequence of integers defining the shape of the output tensor.
- Can be a variable number of arguments or a collection like a list or tuple.
- Keyword arguments:
- {out}
- {dtype}
- {layout}
- {device}
- {requires_grad}
- Example::
- >>> torch.ones(2, 3)
- tensor([[ 1., 1., 1.],
- [ 1., 1., 1.]])
- >>> torch.ones(5)
- tensor([ 1., 1., 1., 1., 1.])
- """.format(
- **factory_common_args
- ),
- )
- add_docstr(
- torch.ones_like,
- r"""
- ones_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
- Returns a tensor filled with the scalar value `1`, with the same size as
- :attr:`input`. ``torch.ones_like(input)`` is equivalent to
- ``torch.ones(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
- .. warning::
- As of 0.4, this function does not support an :attr:`out` keyword. As an alternative,
- the old ``torch.ones_like(input, out=output)`` is equivalent to
- ``torch.ones(input.size(), out=output)``.
- Args:
- {input}
- Keyword arguments:
- {dtype}
- {layout}
- {device}
- {requires_grad}
- {memory_format}
- Example::
- >>> input = torch.empty(2, 3)
- >>> torch.ones_like(input)
- tensor([[ 1., 1., 1.],
- [ 1., 1., 1.]])
- """.format(
- **factory_like_common_args
- ),
- )
- add_docstr(
- torch.orgqr,
- r"""
- orgqr(input, tau) -> Tensor
- Alias for :func:`torch.linalg.householder_product`.
- """,
- )
- add_docstr(
- torch.ormqr,
- r"""
- ormqr(input, tau, other, left=True, transpose=False, *, out=None) -> Tensor
- Computes the matrix-matrix multiplication of a product of Householder matrices with a general matrix.
- Multiplies a :math:`m \times n` matrix `C` (given by :attr:`other`) with a matrix `Q`,
- where `Q` is represented using Householder reflectors `(input, tau)`.
- See `Representation of Orthogonal or Unitary Matrices`_ for further details.
- If :attr:`left` is `True` then `op(Q)` times `C` is computed, otherwise the result is `C` times `op(Q)`.
- When :attr:`left` is `True`, the implicit matrix `Q` has size :math:`m \times m`.
- It has size :math:`n \times n` otherwise.
- If :attr:`transpose` is `True` then `op` is the conjugate transpose operation, otherwise it's a no-op.
- Supports inputs of float, double, cfloat and cdouble dtypes.
- Also supports batched inputs, and, if the input is batched, the output is batched with the same dimensions.
- .. seealso::
- :func:`torch.geqrf` can be used to form the Householder representation `(input, tau)` of matrix `Q`
- from the QR decomposition.
- .. note::
- This function supports backward but it is only fast when ``(input, tau)`` do not require gradients
- and/or ``tau.size(-1)`` is very small.
- ``
- Args:
- input (Tensor): tensor of shape `(*, mn, k)` where `*` is zero or more batch dimensions
- and `mn` equals to `m` or `n` depending on the :attr:`left`.
- tau (Tensor): tensor of shape `(*, min(mn, k))` where `*` is zero or more batch dimensions.
- other (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
- left (bool): controls the order of multiplication.
- transpose (bool): controls whether the matrix `Q` is conjugate transposed or not.
- Keyword args:
- out (Tensor, optional): the output Tensor. Ignored if `None`. Default: `None`.
- .. _Representation of Orthogonal or Unitary Matrices:
- https://www.netlib.org/lapack/lug/node128.html
- """,
- )
- add_docstr(
- torch.permute,
- r"""
- permute(input, dims) -> Tensor
- Returns a view of the original tensor :attr:`input` with its dimensions permuted.
- Args:
- {input}
- dims (tuple of int): The desired ordering of dimensions
- Example:
- >>> x = torch.randn(2, 3, 5)
- >>> x.size()
- torch.Size([2, 3, 5])
- >>> torch.permute(x, (2, 0, 1)).size()
- torch.Size([5, 2, 3])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.poisson,
- r"""
- poisson(input, generator=None) -> Tensor
- Returns a tensor of the same size as :attr:`input` with each element
- sampled from a Poisson distribution with rate parameter given by the corresponding
- element in :attr:`input` i.e.,
- .. math::
- \text{{out}}_i \sim \text{{Poisson}}(\text{{input}}_i)
- :attr:`input` must be non-negative.
- Args:
- input (Tensor): the input tensor containing the rates of the Poisson distribution
- Keyword args:
- {generator}
- Example::
- >>> rates = torch.rand(4, 4) * 5 # rate parameter between 0 and 5
- >>> torch.poisson(rates)
- tensor([[9., 1., 3., 5.],
- [8., 6., 6., 0.],
- [0., 4., 5., 3.],
- [2., 1., 4., 2.]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.polygamma,
- r"""
- polygamma(n, input, *, out=None) -> Tensor
- Alias for :func:`torch.special.polygamma`.
- """,
- )
- add_docstr(
- torch.positive,
- r"""
- positive(input) -> Tensor
- Returns :attr:`input`.
- Throws a runtime error if :attr:`input` is a bool tensor.
- """
- + r"""
- Args:
- {input}
- Example::
- >>> t = torch.randn(5)
- >>> t
- tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940])
- >>> torch.positive(t)
- tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.pow,
- r"""
- pow(input, exponent, *, out=None) -> Tensor
- Takes the power of each element in :attr:`input` with :attr:`exponent` and
- returns a tensor with the result.
- :attr:`exponent` can be either a single ``float`` number or a `Tensor`
- with the same number of elements as :attr:`input`.
- When :attr:`exponent` is a scalar value, the operation applied is:
- .. math::
- \text{out}_i = x_i ^ \text{exponent}
- When :attr:`exponent` is a tensor, the operation applied is:
- .. math::
- \text{out}_i = x_i ^ {\text{exponent}_i}
- """
- + r"""
- When :attr:`exponent` is a tensor, the shapes of :attr:`input`
- and :attr:`exponent` must be :ref:`broadcastable <broadcasting-semantics>`.
- Args:
- {input}
- exponent (float or tensor): the exponent value
- Keyword args:
- {out}
- Example::
- >>> a = torch.randn(4)
- >>> a
- tensor([ 0.4331, 1.2475, 0.6834, -0.2791])
- >>> torch.pow(a, 2)
- tensor([ 0.1875, 1.5561, 0.4670, 0.0779])
- >>> exp = torch.arange(1., 5.)
- >>> a = torch.arange(1., 5.)
- >>> a
- tensor([ 1., 2., 3., 4.])
- >>> exp
- tensor([ 1., 2., 3., 4.])
- >>> torch.pow(a, exp)
- tensor([ 1., 4., 27., 256.])
- .. function:: pow(self, exponent, *, out=None) -> Tensor
- :noindex:
- :attr:`self` is a scalar ``float`` value, and :attr:`exponent` is a tensor.
- The returned tensor :attr:`out` is of the same shape as :attr:`exponent`
- The operation applied is:
- .. math::
- \text{{out}}_i = \text{{self}} ^ {{\text{{exponent}}_i}}
- Args:
- self (float): the scalar base value for the power operation
- exponent (Tensor): the exponent tensor
- Keyword args:
- {out}
- Example::
- >>> exp = torch.arange(1., 5.)
- >>> base = 2
- >>> torch.pow(base, exp)
- tensor([ 2., 4., 8., 16.])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.float_power,
- r"""
- float_power(input, exponent, *, out=None) -> Tensor
- Raises :attr:`input` to the power of :attr:`exponent`, elementwise, in double precision.
- If neither input is complex returns a ``torch.float64`` tensor,
- and if one or more inputs is complex returns a ``torch.complex128`` tensor.
- .. note::
- This function always computes in double precision, unlike :func:`torch.pow`,
- which implements more typical :ref:`type promotion <type-promotion-doc>`.
- This is useful when the computation needs to be performed in a wider or more precise dtype,
- or the results of the computation may contain fractional values not representable in the input dtypes,
- like when an integer base is raised to a negative integer exponent.
- Args:
- input (Tensor or Number): the base value(s)
- exponent (Tensor or Number): the exponent value(s)
- Keyword args:
- {out}
- Example::
- >>> a = torch.randint(10, (4,))
- >>> a
- tensor([6, 4, 7, 1])
- >>> torch.float_power(a, 2)
- tensor([36., 16., 49., 1.], dtype=torch.float64)
- >>> a = torch.arange(1, 5)
- >>> a
- tensor([ 1, 2, 3, 4])
- >>> exp = torch.tensor([2, -3, 4, -5])
- >>> exp
- tensor([ 2, -3, 4, -5])
- >>> torch.float_power(a, exp)
- tensor([1.0000e+00, 1.2500e-01, 8.1000e+01, 9.7656e-04], dtype=torch.float64)
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.prod,
- r"""
- prod(input, *, dtype=None) -> Tensor
- Returns the product of all elements in the :attr:`input` tensor.
- Args:
- {input}
- Keyword args:
- {dtype}
- Example::
- >>> a = torch.randn(1, 3)
- >>> a
- tensor([[-0.8020, 0.5428, -1.5854]])
- >>> torch.prod(a)
- tensor(0.6902)
- .. function:: prod(input, dim, keepdim=False, *, dtype=None) -> Tensor
- :noindex:
- Returns the product of each row of the :attr:`input` tensor in the given
- dimension :attr:`dim`.
- {keepdim_details}
- Args:
- {input}
- {dim}
- {keepdim}
- Keyword args:
- {dtype}
- Example::
- >>> a = torch.randn(4, 2)
- >>> a
- tensor([[ 0.5261, -0.3837],
- [ 1.1857, -0.2498],
- [-1.1646, 0.0705],
- [ 1.1131, -1.0629]])
- >>> torch.prod(a, 1)
- tensor([-0.2018, -0.2962, -0.0821, -1.1831])
- """.format(
- **single_dim_common
- ),
- )
- add_docstr(
- torch.promote_types,
- r"""
- promote_types(type1, type2) -> dtype
- Returns the :class:`torch.dtype` with the smallest size and scalar kind that is
- not smaller nor of lower kind than either `type1` or `type2`. See type promotion
- :ref:`documentation <type-promotion-doc>` for more information on the type
- promotion logic.
- Args:
- type1 (:class:`torch.dtype`)
- type2 (:class:`torch.dtype`)
- Example::
- >>> torch.promote_types(torch.int32, torch.float32)
- torch.float32
- >>> torch.promote_types(torch.uint8, torch.long)
- torch.long
- """,
- )
- add_docstr(
- torch.qr,
- r"""
- qr(input, some=True, *, out=None) -> (Tensor, Tensor)
- Computes the QR decomposition of a matrix or a batch of matrices :attr:`input`,
- and returns a namedtuple (Q, R) of tensors such that :math:`\text{input} = Q R`
- with :math:`Q` being an orthogonal matrix or batch of orthogonal matrices and
- :math:`R` being an upper triangular matrix or batch of upper triangular matrices.
- If :attr:`some` is ``True``, then this function returns the thin (reduced) QR factorization.
- Otherwise, if :attr:`some` is ``False``, this function returns the complete QR factorization.
- .. warning::
- :func:`torch.qr` is deprecated in favor of :func:`torch.linalg.qr`
- and will be removed in a future PyTorch release. The boolean parameter :attr:`some` has been
- replaced with a string parameter :attr:`mode`.
- ``Q, R = torch.qr(A)`` should be replaced with
- .. code:: python
- Q, R = torch.linalg.qr(A)
- ``Q, R = torch.qr(A, some=False)`` should be replaced with
- .. code:: python
- Q, R = torch.linalg.qr(A, mode="complete")
- .. warning::
- If you plan to backpropagate through QR, note that the current backward implementation
- is only well-defined when the first :math:`\min(input.size(-1), input.size(-2))`
- columns of :attr:`input` are linearly independent.
- This behavior will probably change once QR supports pivoting.
- .. note:: This function uses LAPACK for CPU inputs and MAGMA for CUDA inputs,
- and may produce different (valid) decompositions on different device types
- or different platforms.
- Args:
- input (Tensor): the input tensor of size :math:`(*, m, n)` where `*` is zero or more
- batch dimensions consisting of matrices of dimension :math:`m \times n`.
- some (bool, optional): Set to ``True`` for reduced QR decomposition and ``False`` for
- complete QR decomposition. If `k = min(m, n)` then:
- * ``some=True`` : returns `(Q, R)` with dimensions (m, k), (k, n) (default)
- * ``'some=False'``: returns `(Q, R)` with dimensions (m, m), (m, n)
- Keyword args:
- out (tuple, optional): tuple of `Q` and `R` tensors.
- The dimensions of `Q` and `R` are detailed in the description of :attr:`some` above.
- Example::
- >>> a = torch.tensor([[12., -51, 4], [6, 167, -68], [-4, 24, -41]])
- >>> q, r = torch.qr(a)
- >>> q
- tensor([[-0.8571, 0.3943, 0.3314],
- [-0.4286, -0.9029, -0.0343],
- [ 0.2857, -0.1714, 0.9429]])
- >>> r
- tensor([[ -14.0000, -21.0000, 14.0000],
- [ 0.0000, -175.0000, 70.0000],
- [ 0.0000, 0.0000, -35.0000]])
- >>> torch.mm(q, r).round()
- tensor([[ 12., -51., 4.],
- [ 6., 167., -68.],
- [ -4., 24., -41.]])
- >>> torch.mm(q.t(), q).round()
- tensor([[ 1., 0., 0.],
- [ 0., 1., -0.],
- [ 0., -0., 1.]])
- >>> a = torch.randn(3, 4, 5)
- >>> q, r = torch.qr(a, some=False)
- >>> torch.allclose(torch.matmul(q, r), a)
- True
- >>> torch.allclose(torch.matmul(q.mT, q), torch.eye(5))
- True
- """,
- )
- add_docstr(
- torch.rad2deg,
- r"""
- rad2deg(input, *, out=None) -> Tensor
- Returns a new tensor with each of the elements of :attr:`input`
- converted from angles in radians to degrees.
- Args:
- {input}
- Keyword arguments:
- {out}
- Example::
- >>> a = torch.tensor([[3.142, -3.142], [6.283, -6.283], [1.570, -1.570]])
- >>> torch.rad2deg(a)
- tensor([[ 180.0233, -180.0233],
- [ 359.9894, -359.9894],
- [ 89.9544, -89.9544]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.deg2rad,
- r"""
- deg2rad(input, *, out=None) -> Tensor
- Returns a new tensor with each of the elements of :attr:`input`
- converted from angles in degrees to radians.
- Args:
- {input}
- Keyword arguments:
- {out}
- Example::
- >>> a = torch.tensor([[180.0, -180.0], [360.0, -360.0], [90.0, -90.0]])
- >>> torch.deg2rad(a)
- tensor([[ 3.1416, -3.1416],
- [ 6.2832, -6.2832],
- [ 1.5708, -1.5708]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.heaviside,
- r"""
- heaviside(input, values, *, out=None) -> Tensor
- Computes the Heaviside step function for each element in :attr:`input`.
- The Heaviside step function is defined as:
- .. math::
- \text{{heaviside}}(input, values) = \begin{cases}
- 0, & \text{if input < 0}\\
- values, & \text{if input == 0}\\
- 1, & \text{if input > 0}
- \end{cases}
- """
- + r"""
- Args:
- {input}
- values (Tensor): The values to use where :attr:`input` is zero.
- Keyword arguments:
- {out}
- Example::
- >>> input = torch.tensor([-1.5, 0, 2.0])
- >>> values = torch.tensor([0.5])
- >>> torch.heaviside(input, values)
- tensor([0.0000, 0.5000, 1.0000])
- >>> values = torch.tensor([1.2, -2.0, 3.5])
- >>> torch.heaviside(input, values)
- tensor([0., -2., 1.])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.rand,
- """
- rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, \
- requires_grad=False, pin_memory=False) -> Tensor
- """
- + r"""
- Returns a tensor filled with random numbers from a uniform distribution
- on the interval :math:`[0, 1)`
- The shape of the tensor is defined by the variable argument :attr:`size`.
- Args:
- size (int...): a sequence of integers defining the shape of the output tensor.
- Can be a variable number of arguments or a collection like a list or tuple.
- Keyword args:
- {generator}
- {out}
- {dtype}
- {layout}
- {device}
- {requires_grad}
- {pin_memory}
- Example::
- >>> torch.rand(4)
- tensor([ 0.5204, 0.2503, 0.3525, 0.5673])
- >>> torch.rand(2, 3)
- tensor([[ 0.8237, 0.5781, 0.6879],
- [ 0.3816, 0.7249, 0.0998]])
- """.format(
- **factory_common_args
- ),
- )
- add_docstr(
- torch.rand_like,
- r"""
- rand_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
- Returns a tensor with the same size as :attr:`input` that is filled with
- random numbers from a uniform distribution on the interval :math:`[0, 1)`.
- ``torch.rand_like(input)`` is equivalent to
- ``torch.rand(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
- Args:
- {input}
- Keyword args:
- {dtype}
- {layout}
- {device}
- {requires_grad}
- {memory_format}
- """.format(
- **factory_like_common_args
- ),
- )
- add_docstr(
- torch.randint,
- """
- randint(low=0, high, size, \\*, generator=None, out=None, \
- dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
- Returns a tensor filled with random integers generated uniformly
- between :attr:`low` (inclusive) and :attr:`high` (exclusive).
- The shape of the tensor is defined by the variable argument :attr:`size`.
- .. note::
- With the global dtype default (``torch.float32``), this function returns
- a tensor with dtype ``torch.int64``.
- Args:
- low (int, optional): Lowest integer to be drawn from the distribution. Default: 0.
- high (int): One above the highest integer to be drawn from the distribution.
- size (tuple): a tuple defining the shape of the output tensor.
- Keyword args:
- {generator}
- {out}
- dtype (`torch.dtype`, optional) - the desired data type of returned tensor. Default: if ``None``,
- this function returns a tensor with dtype ``torch.int64``.
- {layout}
- {device}
- {requires_grad}
- Example::
- >>> torch.randint(3, 5, (3,))
- tensor([4, 3, 4])
- >>> torch.randint(10, (2, 2))
- tensor([[0, 2],
- [5, 5]])
- >>> torch.randint(3, 10, (2, 2))
- tensor([[4, 5],
- [6, 7]])
- """.format(
- **factory_common_args
- ),
- )
- add_docstr(
- torch.randint_like,
- """
- randint_like(input, low=0, high, \\*, dtype=None, layout=torch.strided, device=None, requires_grad=False, \
- memory_format=torch.preserve_format) -> Tensor
- Returns a tensor with the same shape as Tensor :attr:`input` filled with
- random integers generated uniformly between :attr:`low` (inclusive) and
- :attr:`high` (exclusive).
- .. note:
- With the global dtype default (``torch.float32``), this function returns
- a tensor with dtype ``torch.int64``.
- Args:
- {input}
- low (int, optional): Lowest integer to be drawn from the distribution. Default: 0.
- high (int): One above the highest integer to be drawn from the distribution.
- Keyword args:
- {dtype}
- {layout}
- {device}
- {requires_grad}
- {memory_format}
- """.format(
- **factory_like_common_args
- ),
- )
- add_docstr(
- torch.randn,
- """
- randn(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, \
- pin_memory=False) -> Tensor
- """
- + r"""
- Returns a tensor filled with random numbers from a normal distribution
- with mean `0` and variance `1` (also called the standard normal
- distribution).
- .. math::
- \text{{out}}_{{i}} \sim \mathcal{{N}}(0, 1)
- The shape of the tensor is defined by the variable argument :attr:`size`.
- Args:
- size (int...): a sequence of integers defining the shape of the output tensor.
- Can be a variable number of arguments or a collection like a list or tuple.
- Keyword args:
- {generator}
- {out}
- {dtype}
- {layout}
- {device}
- {requires_grad}
- {pin_memory}
- Example::
- >>> torch.randn(4)
- tensor([-2.1436, 0.9966, 2.3426, -0.6366])
- >>> torch.randn(2, 3)
- tensor([[ 1.5954, 2.8929, -1.0923],
- [ 1.1719, -0.4709, -0.1996]])
- """.format(
- **factory_common_args
- ),
- )
- add_docstr(
- torch.randn_like,
- r"""
- randn_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
- Returns a tensor with the same size as :attr:`input` that is filled with
- random numbers from a normal distribution with mean 0 and variance 1.
- ``torch.randn_like(input)`` is equivalent to
- ``torch.randn(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
- Args:
- {input}
- Keyword args:
- {dtype}
- {layout}
- {device}
- {requires_grad}
- {memory_format}
- """.format(
- **factory_like_common_args
- ),
- )
- add_docstr(
- torch.randperm,
- """
- randperm(n, *, generator=None, out=None, dtype=torch.int64,layout=torch.strided, \
- device=None, requires_grad=False, pin_memory=False) -> Tensor
- """
- + r"""
- Returns a random permutation of integers from ``0`` to ``n - 1``.
- Args:
- n (int): the upper bound (exclusive)
- Keyword args:
- {generator}
- {out}
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: ``torch.int64``.
- {layout}
- {device}
- {requires_grad}
- {pin_memory}
- Example::
- >>> torch.randperm(4)
- tensor([2, 1, 0, 3])
- """.format(
- **factory_common_args
- ),
- )
- add_docstr(
- torch.tensor,
- r"""
- tensor(data, *, dtype=None, device=None, requires_grad=False, pin_memory=False) -> Tensor
- Constructs a tensor with no autograd history (also known as a "leaf tensor", see :doc:`/notes/autograd`) by copying :attr:`data`.
- .. warning::
- When working with tensors prefer using :func:`torch.Tensor.clone`,
- :func:`torch.Tensor.detach`, and :func:`torch.Tensor.requires_grad_` for
- readability. Letting `t` be a tensor, ``torch.tensor(t)`` is equivalent to
- ``t.clone().detach()``, and ``torch.tensor(t, requires_grad=True)``
- is equivalent to ``t.clone().detach().requires_grad_(True)``.
- .. seealso::
- :func:`torch.as_tensor` preserves autograd history and avoids copies where possible.
- :func:`torch.from_numpy` creates a tensor that shares storage with a NumPy array.
- Args:
- {data}
- Keyword args:
- {dtype}
- device (:class:`torch.device`, optional): the device of the constructed tensor. If None and data is a tensor
- then the device of data is used. If None and data is not a tensor then
- the result tensor is constructed on the CPU.
- {requires_grad}
- {pin_memory}
- Example::
- >>> torch.tensor([[0.1, 1.2], [2.2, 3.1], [4.9, 5.2]])
- tensor([[ 0.1000, 1.2000],
- [ 2.2000, 3.1000],
- [ 4.9000, 5.2000]])
- >>> torch.tensor([0, 1]) # Type inference on data
- tensor([ 0, 1])
- >>> torch.tensor([[0.11111, 0.222222, 0.3333333]],
- ... dtype=torch.float64,
- ... device=torch.device('cuda:0')) # creates a double tensor on a CUDA device
- tensor([[ 0.1111, 0.2222, 0.3333]], dtype=torch.float64, device='cuda:0')
- >>> torch.tensor(3.14159) # Create a zero-dimensional (scalar) tensor
- tensor(3.1416)
- >>> torch.tensor([]) # Create an empty tensor (of size (0,))
- tensor([])
- """.format(
- **factory_data_common_args
- ),
- )
- add_docstr(
- torch.range,
- r"""
- range(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
- Returns a 1-D tensor of size :math:`\left\lfloor \frac{\text{end} - \text{start}}{\text{step}} \right\rfloor + 1`
- with values from :attr:`start` to :attr:`end` with step :attr:`step`. Step is
- the gap between two values in the tensor.
- .. math::
- \text{out}_{i+1} = \text{out}_i + \text{step}.
- """
- + r"""
- .. warning::
- This function is deprecated and will be removed in a future release because its behavior is inconsistent with
- Python's range builtin. Instead, use :func:`torch.arange`, which produces values in [start, end).
- Args:
- start (float): the starting value for the set of points. Default: ``0``.
- end (float): the ending value for the set of points
- step (float): the gap between each pair of adjacent points. Default: ``1``.
- Keyword args:
- {out}
- {dtype} If `dtype` is not given, infer the data type from the other input
- arguments. If any of `start`, `end`, or `stop` are floating-point, the
- `dtype` is inferred to be the default dtype, see
- :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to
- be `torch.int64`.
- {layout}
- {device}
- {requires_grad}
- Example::
- >>> torch.range(1, 4)
- tensor([ 1., 2., 3., 4.])
- >>> torch.range(1, 4, 0.5)
- tensor([ 1.0000, 1.5000, 2.0000, 2.5000, 3.0000, 3.5000, 4.0000])
- """.format(
- **factory_common_args
- ),
- )
- add_docstr(
- torch.arange,
- r"""
- arange(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
- Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil`
- with values from the interval ``[start, end)`` taken with common difference
- :attr:`step` beginning from `start`.
- Note that non-integer :attr:`step` is subject to floating point rounding errors when
- comparing against :attr:`end`; to avoid inconsistency, we advise adding a small epsilon to :attr:`end`
- in such cases.
- .. math::
- \text{out}_{{i+1}} = \text{out}_{i} + \text{step}
- """
- + r"""
- Args:
- start (Number): the starting value for the set of points. Default: ``0``.
- end (Number): the ending value for the set of points
- step (Number): the gap between each pair of adjacent points. Default: ``1``.
- Keyword args:
- {out}
- {dtype} If `dtype` is not given, infer the data type from the other input
- arguments. If any of `start`, `end`, or `stop` are floating-point, the
- `dtype` is inferred to be the default dtype, see
- :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to
- be `torch.int64`.
- {layout}
- {device}
- {requires_grad}
- Example::
- >>> torch.arange(5)
- tensor([ 0, 1, 2, 3, 4])
- >>> torch.arange(1, 4)
- tensor([ 1, 2, 3])
- >>> torch.arange(1, 2.5, 0.5)
- tensor([ 1.0000, 1.5000, 2.0000])
- """.format(
- **factory_common_args
- ),
- )
- add_docstr(
- torch.ravel,
- r"""
- ravel(input) -> Tensor
- Return a contiguous flattened tensor. A copy is made only if needed.
- Args:
- {input}
- Example::
- >>> t = torch.tensor([[[1, 2],
- ... [3, 4]],
- ... [[5, 6],
- ... [7, 8]]])
- >>> torch.ravel(t)
- tensor([1, 2, 3, 4, 5, 6, 7, 8])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.remainder,
- r"""
- remainder(input, other, *, out=None) -> Tensor
- Computes
- `Python's modulus operation <https://docs.python.org/3/reference/expressions.html#binary-arithmetic-operations>`_
- entrywise. The result has the same sign as the divisor :attr:`other` and its absolute value
- is less than that of :attr:`other`.
- It may also be defined in terms of :func:`torch.div` as
- .. code:: python
- torch.remainder(a, b) == a - a.div(b, rounding_mode="floor") * b
- Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
- :ref:`type promotion <type-promotion-doc>`, and integer and float inputs.
- .. note::
- Complex inputs are not supported. In some cases, it is not mathematically
- possible to satisfy the definition of a modulo operation with complex numbers.
- See :func:`torch.fmod` for how division by zero is handled.
- .. seealso::
- :func:`torch.fmod` which implements C++'s `std::fmod <https://en.cppreference.com/w/cpp/numeric/math/fmod>`_.
- This one is defined in terms of division rounding towards zero.
- Args:
- input (Tensor or Scalar): the dividend
- other (Tensor or Scalar): the divisor
- Keyword args:
- {out}
- Example::
- >>> torch.remainder(torch.tensor([-3., -2, -1, 1, 2, 3]), 2)
- tensor([ 1., 0., 1., 1., 0., 1.])
- >>> torch.remainder(torch.tensor([1, 2, 3, 4, 5]), -1.5)
- tensor([ -0.5000, -1.0000, 0.0000, -0.5000, -1.0000 ])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.renorm,
- r"""
- renorm(input, p, dim, maxnorm, *, out=None) -> Tensor
- Returns a tensor where each sub-tensor of :attr:`input` along dimension
- :attr:`dim` is normalized such that the `p`-norm of the sub-tensor is lower
- than the value :attr:`maxnorm`
- .. note:: If the norm of a row is lower than `maxnorm`, the row is unchanged
- Args:
- {input}
- p (float): the power for the norm computation
- dim (int): the dimension to slice over to get the sub-tensors
- maxnorm (float): the maximum norm to keep each sub-tensor under
- Keyword args:
- {out}
- Example::
- >>> x = torch.ones(3, 3)
- >>> x[1].fill_(2)
- tensor([ 2., 2., 2.])
- >>> x[2].fill_(3)
- tensor([ 3., 3., 3.])
- >>> x
- tensor([[ 1., 1., 1.],
- [ 2., 2., 2.],
- [ 3., 3., 3.]])
- >>> torch.renorm(x, 1, 0, 5)
- tensor([[ 1.0000, 1.0000, 1.0000],
- [ 1.6667, 1.6667, 1.6667],
- [ 1.6667, 1.6667, 1.6667]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.reshape,
- r"""
- reshape(input, shape) -> Tensor
- Returns a tensor with the same data and number of elements as :attr:`input`,
- but with the specified shape. When possible, the returned tensor will be a view
- of :attr:`input`. Otherwise, it will be a copy. Contiguous inputs and inputs
- with compatible strides can be reshaped without copying, but you should not
- depend on the copying vs. viewing behavior.
- See :meth:`torch.Tensor.view` on when it is possible to return a view.
- A single dimension may be -1, in which case it's inferred from the remaining
- dimensions and the number of elements in :attr:`input`.
- Args:
- input (Tensor): the tensor to be reshaped
- shape (tuple of int): the new shape
- Example::
- >>> a = torch.arange(4.)
- >>> torch.reshape(a, (2, 2))
- tensor([[ 0., 1.],
- [ 2., 3.]])
- >>> b = torch.tensor([[0, 1], [2, 3]])
- >>> torch.reshape(b, (-1,))
- tensor([ 0, 1, 2, 3])
- """,
- )
- add_docstr(
- torch.result_type,
- r"""
- result_type(tensor1, tensor2) -> dtype
- Returns the :class:`torch.dtype` that would result from performing an arithmetic
- operation on the provided input tensors. See type promotion :ref:`documentation <type-promotion-doc>`
- for more information on the type promotion logic.
- Args:
- tensor1 (Tensor or Number): an input tensor or number
- tensor2 (Tensor or Number): an input tensor or number
- Example::
- >>> torch.result_type(torch.tensor([1, 2], dtype=torch.int), 1.0)
- torch.float32
- >>> torch.result_type(torch.tensor([1, 2], dtype=torch.uint8), torch.tensor(1))
- torch.uint8
- """,
- )
- add_docstr(
- torch.row_stack,
- r"""
- row_stack(tensors, *, out=None) -> Tensor
- Alias of :func:`torch.vstack`.
- """,
- )
- add_docstr(
- torch.round,
- r"""
- round(input, *, decimals=0, out=None) -> Tensor
- Rounds elements of :attr:`input` to the nearest integer.
- For integer inputs, follows the array-api convention of returning a
- copy of the input tensor.
- .. note::
- This function implements the "round half to even" to
- break ties when a number is equidistant from two
- integers (e.g. `round(2.5)` is 2).
- When the :attr:\`decimals\` argument is specified the
- algorithm used is similar to NumPy's `around`. This
- algorithm is fast but inexact and it can easily
- overflow for low precision dtypes.
- Eg. `round(tensor([10000], dtype=torch.float16), decimals=3)` is `inf`.
- .. seealso::
- :func:`torch.ceil`, which rounds up.
- :func:`torch.floor`, which rounds down.
- :func:`torch.trunc`, which rounds towards zero.
- Args:
- {input}
- decimals (int): Number of decimal places to round to (default: 0).
- If decimals is negative, it specifies the number of positions
- to the left of the decimal point.
- Keyword args:
- {out}
- Example::
- >>> torch.round(torch.tensor((4.7, -2.3, 9.1, -7.7)))
- tensor([ 5., -2., 9., -8.])
- >>> # Values equidistant from two integers are rounded towards the
- >>> # the nearest even value (zero is treated as even)
- >>> torch.round(torch.tensor([-0.5, 0.5, 1.5, 2.5]))
- tensor([-0., 0., 2., 2.])
- >>> # A positive decimals argument rounds to the to that decimal place
- >>> torch.round(torch.tensor([0.1234567]), decimals=3)
- tensor([0.1230])
- >>> # A negative decimals argument rounds to the left of the decimal
- >>> torch.round(torch.tensor([1200.1234567]), decimals=-3)
- tensor([1000.])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.rsqrt,
- r"""
- rsqrt(input, *, out=None) -> Tensor
- Returns a new tensor with the reciprocal of the square-root of each of
- the elements of :attr:`input`.
- .. math::
- \text{out}_{i} = \frac{1}{\sqrt{\text{input}_{i}}}
- """
- + r"""
- Args:
- {input}
- Keyword args:
- {out}
- Example::
- >>> a = torch.randn(4)
- >>> a
- tensor([-0.0370, 0.2970, 1.5420, -0.9105])
- >>> torch.rsqrt(a)
- tensor([ nan, 1.8351, 0.8053, nan])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.scatter,
- r"""
- scatter(input, dim, index, src) -> Tensor
- Out-of-place version of :meth:`torch.Tensor.scatter_`
- """,
- )
- add_docstr(
- torch.scatter_add,
- r"""
- scatter_add(input, dim, index, src) -> Tensor
- Out-of-place version of :meth:`torch.Tensor.scatter_add_`
- """,
- )
- add_docstr(
- torch.scatter_reduce,
- r"""
- scatter_reduce(input, dim, index, src, reduce, *, include_self=True) -> Tensor
- Out-of-place version of :meth:`torch.Tensor.scatter_reduce_`
- """,
- )
- add_docstr(
- torch.select,
- r"""
- select(input, dim, index) -> Tensor
- Slices the :attr:`input` tensor along the selected dimension at the given index.
- This function returns a view of the original tensor with the given dimension removed.
- .. note:: If :attr:`input` is a sparse tensor and returning a view of
- the tensor is not possible, a RuntimeError exception is
- raised. In this is the case, consider using
- :func:`torch.select_copy` function.
- Args:
- {input}
- dim (int): the dimension to slice
- index (int): the index to select with
- .. note::
- :meth:`select` is equivalent to slicing. For example,
- ``tensor.select(0, index)`` is equivalent to ``tensor[index]`` and
- ``tensor.select(2, index)`` is equivalent to ``tensor[:,:,index]``.
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.select_scatter,
- r"""
- select_scatter(input, src, dim, index) -> Tensor
- Embeds the values of the :attr:`src` tensor into :attr:`input` at the given index.
- This function returns a tensor with fresh storage; it does not create a view.
- Args:
- {input}
- src (Tensor): The tensor to embed into :attr:`input`
- dim (int): the dimension to insert the slice into.
- index (int): the index to select with
- .. note::
- :attr:`src` must be of the proper size in order to be embedded
- into :attr:`input`. Specifically, it should have the same shape as
- ``torch.select(input, dim, index)``
- Example::
- >>> a = torch.zeros(2, 2)
- >>> b = torch.ones(2)
- >>> a.select_scatter(b, 0, 0)
- tensor([[1., 1.],
- [0., 0.]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.slice_scatter,
- r"""
- slice_scatter(input, src, dim=0, start=None, end=None, step=1) -> Tensor
- Embeds the values of the :attr:`src` tensor into :attr:`input` at the given
- dimension.
- This function returns a tensor with fresh storage; it does not create a view.
- Args:
- {input}
- src (Tensor): The tensor to embed into :attr:`input`
- dim (int): the dimension to insert the slice into
- start (Optional[int]): the start index of where to insert the slice
- end (Optional[int]): the end index of where to insert the slice
- step (int): the how many elements to skip in
- Example::
- >>> a = torch.zeros(8, 8)
- >>> b = torch.ones(8)
- >>> a.slice_scatter(b, start=6)
- tensor([[0., 0., 0., 0., 0., 0., 0., 0.],
- [0., 0., 0., 0., 0., 0., 0., 0.],
- [0., 0., 0., 0., 0., 0., 0., 0.],
- [0., 0., 0., 0., 0., 0., 0., 0.],
- [0., 0., 0., 0., 0., 0., 0., 0.],
- [0., 0., 0., 0., 0., 0., 0., 0.],
- [1., 1., 1., 1., 1., 1., 1., 1.],
- [1., 1., 1., 1., 1., 1., 1., 1.]])
- >>> b = torch.ones(2)
- >>> a.slice_scatter(b, dim=1, start=2, end=6, step=2)
- tensor([[0., 0., 1., 0., 1., 0., 0., 0.],
- [0., 0., 1., 0., 1., 0., 0., 0.],
- [0., 0., 1., 0., 1., 0., 0., 0.],
- [0., 0., 1., 0., 1., 0., 0., 0.],
- [0., 0., 1., 0., 1., 0., 0., 0.],
- [0., 0., 1., 0., 1., 0., 0., 0.],
- [0., 0., 1., 0., 1., 0., 0., 0.],
- [0., 0., 1., 0., 1., 0., 0., 0.]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.set_flush_denormal,
- r"""
- set_flush_denormal(mode) -> bool
- Disables denormal floating numbers on CPU.
- Returns ``True`` if your system supports flushing denormal numbers and it
- successfully configures flush denormal mode. :meth:`~torch.set_flush_denormal`
- is only supported on x86 architectures supporting SSE3.
- Args:
- mode (bool): Controls whether to enable flush denormal mode or not
- Example::
- >>> torch.set_flush_denormal(True)
- True
- >>> torch.tensor([1e-323], dtype=torch.float64)
- tensor([ 0.], dtype=torch.float64)
- >>> torch.set_flush_denormal(False)
- True
- >>> torch.tensor([1e-323], dtype=torch.float64)
- tensor(9.88131e-324 *
- [ 1.0000], dtype=torch.float64)
- """,
- )
- add_docstr(
- torch.set_num_threads,
- r"""
- set_num_threads(int)
- Sets the number of threads used for intraop parallelism on CPU.
- .. warning::
- To ensure that the correct number of threads is used, set_num_threads
- must be called before running eager, JIT or autograd code.
- """,
- )
- add_docstr(
- torch.set_num_interop_threads,
- r"""
- set_num_interop_threads(int)
- Sets the number of threads used for interop parallelism
- (e.g. in JIT interpreter) on CPU.
- .. warning::
- Can only be called once and before any inter-op parallel work
- is started (e.g. JIT execution).
- """,
- )
- add_docstr(
- torch.sigmoid,
- r"""
- sigmoid(input, *, out=None) -> Tensor
- Alias for :func:`torch.special.expit`.
- """,
- )
- add_docstr(
- torch.logit,
- r"""
- logit(input, eps=None, *, out=None) -> Tensor
- Alias for :func:`torch.special.logit`.
- """,
- )
- add_docstr(
- torch.sign,
- r"""
- sign(input, *, out=None) -> Tensor
- Returns a new tensor with the signs of the elements of :attr:`input`.
- .. math::
- \text{out}_{i} = \operatorname{sgn}(\text{input}_{i})
- """
- + r"""
- Args:
- {input}
- Keyword args:
- {out}
- Example::
- >>> a = torch.tensor([0.7, -1.2, 0., 2.3])
- >>> a
- tensor([ 0.7000, -1.2000, 0.0000, 2.3000])
- >>> torch.sign(a)
- tensor([ 1., -1., 0., 1.])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.signbit,
- r"""
- signbit(input, *, out=None) -> Tensor
- Tests if each element of :attr:`input` has its sign bit set or not.
- Args:
- {input}
- Keyword args:
- {out}
- Example::
- >>> a = torch.tensor([0.7, -1.2, 0., 2.3])
- >>> torch.signbit(a)
- tensor([ False, True, False, False])
- >>> a = torch.tensor([-0.0, 0.0])
- >>> torch.signbit(a)
- tensor([ True, False])
- .. note::
- signbit handles signed zeros, so negative zero (-0) returns True.
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.sgn,
- r"""
- sgn(input, *, out=None) -> Tensor
- This function is an extension of torch.sign() to complex tensors.
- It computes a new tensor whose elements have
- the same angles as the corresponding elements of :attr:`input` and
- absolute values (i.e. magnitudes) of one for complex tensors and
- is equivalent to torch.sign() for non-complex tensors.
- .. math::
- \text{out}_{i} = \begin{cases}
- 0 & |\text{{input}}_i| == 0 \\
- \frac{{\text{{input}}_i}}{|{\text{{input}}_i}|} & \text{otherwise}
- \end{cases}
- """
- + r"""
- Args:
- {input}
- Keyword args:
- {out}
- Example::
- >>> t = torch.tensor([3+4j, 7-24j, 0, 1+2j])
- >>> t.sgn()
- tensor([0.6000+0.8000j, 0.2800-0.9600j, 0.0000+0.0000j, 0.4472+0.8944j])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.sin,
- r"""
- sin(input, *, out=None) -> Tensor
- Returns a new tensor with the sine of the elements of :attr:`input`.
- .. math::
- \text{out}_{i} = \sin(\text{input}_{i})
- """
- + r"""
- Args:
- {input}
- Keyword args:
- {out}
- Example::
- >>> a = torch.randn(4)
- >>> a
- tensor([-0.5461, 0.1347, -2.7266, -0.2746])
- >>> torch.sin(a)
- tensor([-0.5194, 0.1343, -0.4032, -0.2711])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.sinc,
- r"""
- sinc(input, *, out=None) -> Tensor
- Alias for :func:`torch.special.sinc`.
- """,
- )
- add_docstr(
- torch.sinh,
- r"""
- sinh(input, *, out=None) -> Tensor
- Returns a new tensor with the hyperbolic sine of the elements of
- :attr:`input`.
- .. math::
- \text{out}_{i} = \sinh(\text{input}_{i})
- """
- + r"""
- Args:
- {input}
- Keyword args:
- {out}
- Example::
- >>> a = torch.randn(4)
- >>> a
- tensor([ 0.5380, -0.8632, -0.1265, 0.9399])
- >>> torch.sinh(a)
- tensor([ 0.5644, -0.9744, -0.1268, 1.0845])
- .. note::
- When :attr:`input` is on the CPU, the implementation of torch.sinh may use
- the Sleef library, which rounds very large results to infinity or negative
- infinity. See `here <https://sleef.org/purec.xhtml>`_ for details.
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.sort,
- r"""
- sort(input, dim=-1, descending=False, stable=False, *, out=None) -> (Tensor, LongTensor)
- Sorts the elements of the :attr:`input` tensor along a given dimension
- in ascending order by value.
- If :attr:`dim` is not given, the last dimension of the `input` is chosen.
- If :attr:`descending` is ``True`` then the elements are sorted in descending
- order by value.
- If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving
- the order of equivalent elements.
- A namedtuple of (values, indices) is returned, where the `values` are the
- sorted values and `indices` are the indices of the elements in the original
- `input` tensor.
- Args:
- {input}
- dim (int, optional): the dimension to sort along
- descending (bool, optional): controls the sorting order (ascending or descending)
- stable (bool, optional): makes the sorting routine stable, which guarantees that the order
- of equivalent elements is preserved.
- Keyword args:
- out (tuple, optional): the output tuple of (`Tensor`, `LongTensor`) that can
- be optionally given to be used as output buffers
- Example::
- >>> x = torch.randn(3, 4)
- >>> sorted, indices = torch.sort(x)
- >>> sorted
- tensor([[-0.2162, 0.0608, 0.6719, 2.3332],
- [-0.5793, 0.0061, 0.6058, 0.9497],
- [-0.5071, 0.3343, 0.9553, 1.0960]])
- >>> indices
- tensor([[ 1, 0, 2, 3],
- [ 3, 1, 0, 2],
- [ 0, 3, 1, 2]])
- >>> sorted, indices = torch.sort(x, 0)
- >>> sorted
- tensor([[-0.5071, -0.2162, 0.6719, -0.5793],
- [ 0.0608, 0.0061, 0.9497, 0.3343],
- [ 0.6058, 0.9553, 1.0960, 2.3332]])
- >>> indices
- tensor([[ 2, 0, 0, 1],
- [ 0, 1, 1, 2],
- [ 1, 2, 2, 0]])
- >>> x = torch.tensor([0, 1] * 9)
- >>> x.sort()
- torch.return_types.sort(
- values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
- indices=tensor([ 2, 16, 4, 6, 14, 8, 0, 10, 12, 9, 17, 15, 13, 11, 7, 5, 3, 1]))
- >>> x.sort(stable=True)
- torch.return_types.sort(
- values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
- indices=tensor([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 1, 3, 5, 7, 9, 11, 13, 15, 17]))
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.argsort,
- r"""
- argsort(input, dim=-1, descending=False, stable=False) -> Tensor
- Returns the indices that sort a tensor along a given dimension in ascending
- order by value.
- This is the second value returned by :meth:`torch.sort`. See its documentation
- for the exact semantics of this method.
- If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving
- the order of equivalent elements. If ``False``, the relative order of values
- which compare equal is not guaranteed. ``True`` is slower.
- Args:
- {input}
- dim (int, optional): the dimension to sort along
- descending (bool, optional): controls the sorting order (ascending or descending)
- stable (bool, optional): controls the relative order of equivalent elements
- Example::
- >>> a = torch.randn(4, 4)
- >>> a
- tensor([[ 0.0785, 1.5267, -0.8521, 0.4065],
- [ 0.1598, 0.0788, -0.0745, -1.2700],
- [ 1.2208, 1.0722, -0.7064, 1.2564],
- [ 0.0669, -0.2318, -0.8229, -0.9280]])
- >>> torch.argsort(a, dim=1)
- tensor([[2, 0, 3, 1],
- [3, 2, 1, 0],
- [2, 1, 0, 3],
- [3, 2, 1, 0]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.msort,
- r"""
- msort(input, *, out=None) -> Tensor
- Sorts the elements of the :attr:`input` tensor along its first dimension
- in ascending order by value.
- .. note:: `torch.msort(t)` is equivalent to `torch.sort(t, dim=0)[0]`.
- See also :func:`torch.sort`.
- Args:
- {input}
- Keyword args:
- {out}
- Example::
- >>> t = torch.randn(3, 4)
- >>> t
- tensor([[-0.1321, 0.4370, -1.2631, -1.1289],
- [-2.0527, -1.1250, 0.2275, 0.3077],
- [-0.0881, -0.1259, -0.5495, 1.0284]])
- >>> torch.msort(t)
- tensor([[-2.0527, -1.1250, -1.2631, -1.1289],
- [-0.1321, -0.1259, -0.5495, 0.3077],
- [-0.0881, 0.4370, 0.2275, 1.0284]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.sparse_compressed_tensor,
- r"""sparse_compressed_tensor(compressed_indices, plain_indices, values, size=None, """
- r"""*, dtype=None, layout=None, device=None, requires_grad=False, check_invariants=None) -> Tensor
- Constructs a :ref:`sparse tensor in Compressed Sparse format - CSR,
- CSC, BSR, or BSC - <sparse-compressed-docs>` with specified values at
- the given :attr:`compressed_indices` and :attr:`plain_indices`. Sparse
- matrix multiplication operations in Compressed Sparse format are
- typically faster than that for sparse tensors in COO format. Make you
- have a look at :ref:`the note on the data type of the indices
- <sparse-compressed-docs>`.
- {sparse_factory_device_note}
- Args:
- compressed_indices (array_like): (B+1)-dimensional array of size
- ``(*batchsize, compressed_dim_size + 1)``. The last element of
- each batch is the number of non-zero elements or blocks. This
- tensor encodes the index in ``values`` and ``plain_indices``
- depending on where the given compressed dimension (row or
- column) starts. Each successive number in the tensor
- subtracted by the number before it denotes the number of
- elements or blocks in a given compressed dimension.
- plain_indices (array_like): Plain dimension (column or row)
- co-ordinates of each element or block in values. (B+1)-dimensional
- tensor with the same length as values.
- values (array_list): Initial values for the tensor. Can be a list,
- tuple, NumPy ``ndarray``, scalar, and other types. that
- represents a (1+K)-dimensional (for CSR and CSC layouts) or
- (1+2+K)-dimensional tensor (for BSR and BSC layouts) where
- ``K`` is the number of dense dimensions.
- size (list, tuple, :class:`torch.Size`, optional): Size of the
- sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols *
- blocksize[1], *densesize)`` where ``blocksize[0] ==
- blocksize[1] == 1`` for CSR and CSC formats. If not provided,
- the size will be inferred as the minimum size big enough to
- hold all non-zero elements or blocks.
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of
- returned tensor. Default: if None, infers data type from
- :attr:`values`.
- layout (:class:`torch.layout`, required): the desired layout of
- returned tensor: :attr:`torch.sparse_csr`,
- :attr:`torch.sparse_csc`, :attr:`torch.sparse_bsr`, or
- :attr:`torch.sparse_bsc`.
- device (:class:`torch.device`, optional): the desired device of
- returned tensor. Default: if None, uses the current device
- for the default tensor type (see
- :func:`torch.set_default_tensor_type`). :attr:`device` will be
- the CPU for CPU tensor types and the current CUDA device for
- CUDA tensor types.
- {requires_grad}
- {check_invariants}
- Example::
- >>> compressed_indices = [0, 2, 4]
- >>> plain_indices = [0, 1, 0, 1]
- >>> values = [1, 2, 3, 4]
- >>> torch.sparse_compressed_tensor(torch.tensor(compressed_indices, dtype=torch.int64),
- ... torch.tensor(plain_indices, dtype=torch.int64),
- ... torch.tensor(values), dtype=torch.double, layout=torch.sparse_csr)
- tensor(crow_indices=tensor([0, 2, 4]),
- col_indices=tensor([0, 1, 0, 1]),
- values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4,
- dtype=torch.float64, layout=torch.sparse_csr)
- """.format(
- **factory_common_args
- ),
- )
- add_docstr(
- torch.sparse_csr_tensor,
- r"""sparse_csr_tensor(crow_indices, col_indices, values, size=None, """
- r"""*, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor
- Constructs a :ref:`sparse tensor in CSR (Compressed Sparse Row) <sparse-csr-docs>` with specified
- values at the given :attr:`crow_indices` and :attr:`col_indices`. Sparse matrix multiplication operations
- in CSR format are typically faster than that for sparse tensors in COO format. Make you have a look
- at :ref:`the note on the data type of the indices <sparse-csr-docs>`.
- {sparse_factory_device_note}
- Args:
- crow_indices (array_like): (B+1)-dimensional array of size
- ``(*batchsize, nrows + 1)``. The last element of each batch
- is the number of non-zeros. This tensor encodes the index in
- values and col_indices depending on where the given row
- starts. Each successive number in the tensor subtracted by the
- number before it denotes the number of elements in a given
- row.
- col_indices (array_like): Column co-ordinates of each element in
- values. (B+1)-dimensional tensor with the same length
- as values.
- values (array_list): Initial values for the tensor. Can be a list,
- tuple, NumPy ``ndarray``, scalar, and other types that
- represents a (1+K)-dimensional tensor where ``K`` is the number
- of dense dimensions.
- size (list, tuple, :class:`torch.Size`, optional): Size of the
- sparse tensor: ``(*batchsize, nrows, ncols, *densesize)``. If
- not provided, the size will be inferred as the minimum size
- big enough to hold all non-zero elements.
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of
- returned tensor. Default: if None, infers data type from
- :attr:`values`.
- device (:class:`torch.device`, optional): the desired device of
- returned tensor. Default: if None, uses the current device
- for the default tensor type (see
- :func:`torch.set_default_tensor_type`). :attr:`device` will be
- the CPU for CPU tensor types and the current CUDA device for
- CUDA tensor types.
- {requires_grad}
- {check_invariants}
- Example::
- >>> crow_indices = [0, 2, 4]
- >>> col_indices = [0, 1, 0, 1]
- >>> values = [1, 2, 3, 4]
- >>> torch.sparse_csr_tensor(torch.tensor(crow_indices, dtype=torch.int64),
- ... torch.tensor(col_indices, dtype=torch.int64),
- ... torch.tensor(values), dtype=torch.double)
- tensor(crow_indices=tensor([0, 2, 4]),
- col_indices=tensor([0, 1, 0, 1]),
- values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4,
- dtype=torch.float64, layout=torch.sparse_csr)
- """.format(
- **factory_common_args
- ),
- )
- add_docstr(
- torch.sparse_csc_tensor,
- r"""sparse_csc_tensor(ccol_indices, row_indices, values, size=None, """
- r"""*, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor
- Constructs a :ref:`sparse tensor in CSC (Compressed Sparse Column)
- <sparse-csc-docs>` with specified values at the given
- :attr:`ccol_indices` and :attr:`row_indices`. Sparse matrix
- multiplication operations in CSC format are typically faster than that
- for sparse tensors in COO format. Make you have a look at :ref:`the
- note on the data type of the indices <sparse-csc-docs>`.
- {sparse_factory_device_note}
- Args:
- ccol_indices (array_like): (B+1)-dimensional array of size
- ``(*batchsize, ncols + 1)``. The last element of each batch
- is the number of non-zeros. This tensor encodes the index in
- values and row_indices depending on where the given column
- starts. Each successive number in the tensor subtracted by the
- number before it denotes the number of elements in a given
- column.
- row_indices (array_like): Row co-ordinates of each element in
- values. (B+1)-dimensional tensor with the same length as
- values.
- values (array_list): Initial values for the tensor. Can be a list,
- tuple, NumPy ``ndarray``, scalar, and other types that
- represents a (1+K)-dimensional tensor where ``K`` is the number
- of dense dimensions.
- size (list, tuple, :class:`torch.Size`, optional): Size of the
- sparse tensor: ``(*batchsize, nrows, ncols, *densesize)``. If
- not provided, the size will be inferred as the minimum size
- big enough to hold all non-zero elements.
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of
- returned tensor. Default: if None, infers data type from
- :attr:`values`.
- device (:class:`torch.device`, optional): the desired device of
- returned tensor. Default: if None, uses the current device
- for the default tensor type (see
- :func:`torch.set_default_tensor_type`). :attr:`device` will be
- the CPU for CPU tensor types and the current CUDA device for
- CUDA tensor types.
- {requires_grad}
- {check_invariants}
- Example::
- >>> ccol_indices = [0, 2, 4]
- >>> row_indices = [0, 1, 0, 1]
- >>> values = [1, 2, 3, 4]
- >>> torch.sparse_csc_tensor(torch.tensor(ccol_indices, dtype=torch.int64),
- ... torch.tensor(row_indices, dtype=torch.int64),
- ... torch.tensor(values), dtype=torch.double)
- tensor(ccol_indices=tensor([0, 2, 4]),
- row_indices=tensor([0, 1, 0, 1]),
- values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4,
- dtype=torch.float64, layout=torch.sparse_csc)
- """.format(
- **factory_common_args
- ),
- )
- add_docstr(
- torch.sparse_bsr_tensor,
- r"""sparse_bsr_tensor(crow_indices, col_indices, values, size=None, """
- r"""*, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor
- Constructs a :ref:`sparse tensor in BSR (Block Compressed Sparse Row))
- <sparse-bsr-docs>` with specified 2-dimensional blocks at the given
- :attr:`crow_indices` and :attr:`col_indices`. Sparse matrix
- multiplication operations in BSR format are typically faster than that
- for sparse tensors in COO format. Make you have a look at :ref:`the
- note on the data type of the indices <sparse-bsr-docs>`.
- {sparse_factory_device_note}
- Args:
- crow_indices (array_like): (B+1)-dimensional array of size
- ``(*batchsize, nrowblocks + 1)``. The last element of each
- batch is the number of non-zeros. This tensor encodes the
- block index in values and col_indices depending on where the
- given row block starts. Each successive number in the tensor
- subtracted by the number before it denotes the number of
- blocks in a given row.
- col_indices (array_like): Column block co-ordinates of each block
- in values. (B+1)-dimensional tensor with the same length as
- values.
- values (array_list): Initial values for the tensor. Can be a list,
- tuple, NumPy ``ndarray``, scalar, and other types that
- represents a (1 + 2 + K)-dimensional tensor where ``K`` is the
- number of dense dimensions.
- size (list, tuple, :class:`torch.Size`, optional): Size of the
- sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols *
- blocksize[1], *densesize)`` where ``blocksize ==
- values.shape[1:3]``. If not provided, the size will be
- inferred as the minimum size big enough to hold all non-zero
- blocks.
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of
- returned tensor. Default: if None, infers data type from
- :attr:`values`.
- device (:class:`torch.device`, optional): the desired device of
- returned tensor. Default: if None, uses the current device
- for the default tensor type (see
- :func:`torch.set_default_tensor_type`). :attr:`device` will be
- the CPU for CPU tensor types and the current CUDA device for
- CUDA tensor types.
- {requires_grad}
- {check_invariants}
- Example::
- >>> crow_indices = [0, 1, 2]
- >>> col_indices = [0, 1]
- >>> values = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
- >>> torch.sparse_bsr_tensor(torch.tensor(crow_indices, dtype=torch.int64),
- ... torch.tensor(col_indices, dtype=torch.int64),
- ... torch.tensor(values), dtype=torch.double)
- tensor(crow_indices=tensor([0, 1, 2]),
- col_indices=tensor([0, 1]),
- values=tensor([[[1., 2.],
- [3., 4.]],
- [[5., 6.],
- [7., 8.]]]), size=(2, 2), nnz=2, dtype=torch.float64,
- layout=torch.sparse_bsr)
- """.format(
- **factory_common_args
- ),
- )
- add_docstr(
- torch.sparse_bsc_tensor,
- r"""sparse_bsc_tensor(ccol_indices, row_indices, values, size=None, """
- r"""*, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor
- Constructs a :ref:`sparse tensor in BSC (Block Compressed Sparse
- Column)) <sparse-bsc-docs>` with specified 2-dimensional blocks at the
- given :attr:`ccol_indices` and :attr:`row_indices`. Sparse matrix
- multiplication operations in BSC format are typically faster than that
- for sparse tensors in COO format. Make you have a look at :ref:`the
- note on the data type of the indices <sparse-bsc-docs>`.
- {sparse_factory_device_note}
- Args:
- ccol_indices (array_like): (B+1)-dimensional array of size
- ``(*batchsize, ncolblocks + 1)``. The last element of each
- batch is the number of non-zeros. This tensor encodes the
- index in values and row_indices depending on where the given
- column starts. Each successive number in the tensor subtracted
- by the number before it denotes the number of elements in a
- given column.
- row_indices (array_like): Row block co-ordinates of each block in
- values. (B+1)-dimensional tensor with the same length
- as values.
- values (array_list): Initial blocks for the tensor. Can be a list,
- tuple, NumPy ``ndarray``, and other types that
- represents a (1 + 2 + K)-dimensional tensor where ``K`` is the
- number of dense dimensions.
- size (list, tuple, :class:`torch.Size`, optional): Size of the
- sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols *
- blocksize[1], *densesize)`` If not provided, the size will be
- inferred as the minimum size big enough to hold all non-zero
- blocks.
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of
- returned tensor. Default: if None, infers data type from
- :attr:`values`.
- device (:class:`torch.device`, optional): the desired device of
- returned tensor. Default: if None, uses the current device
- for the default tensor type (see
- :func:`torch.set_default_tensor_type`). :attr:`device` will be
- the CPU for CPU tensor types and the current CUDA device for
- CUDA tensor types.
- {requires_grad}
- {check_invariants}
- Example::
- >>> ccol_indices = [0, 1, 2]
- >>> row_indices = [0, 1]
- >>> values = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
- >>> torch.sparse_bsc_tensor(torch.tensor(ccol_indices, dtype=torch.int64),
- ... torch.tensor(row_indices, dtype=torch.int64),
- ... torch.tensor(values), dtype=torch.double)
- tensor(ccol_indices=tensor([0, 1, 2]),
- row_indices=tensor([0, 1]),
- values=tensor([[[1., 2.],
- [3., 4.]],
- [[5., 6.],
- [7., 8.]]]), size=(2, 2), nnz=2, dtype=torch.float64,
- layout=torch.sparse_bsc)
- """.format(
- **factory_common_args
- ),
- )
- add_docstr(
- torch.sparse_coo_tensor,
- r"""
- sparse_coo_tensor(indices, values, size=None, *, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor
- Constructs a :ref:`sparse tensor in COO(rdinate) format
- <sparse-coo-docs>` with specified values at the given
- :attr:`indices`.
- .. note::
- This function returns an :ref:`uncoalesced tensor <sparse-uncoalesced-coo-docs>`.
- {sparse_factory_device_note}
- Args:
- indices (array_like): Initial data for the tensor. Can be a list, tuple,
- NumPy ``ndarray``, scalar, and other types. Will be cast to a :class:`torch.LongTensor`
- internally. The indices are the coordinates of the non-zero values in the matrix, and thus
- should be two-dimensional where the first dimension is the number of tensor dimensions and
- the second dimension is the number of non-zero values.
- values (array_like): Initial values for the tensor. Can be a list, tuple,
- NumPy ``ndarray``, scalar, and other types.
- size (list, tuple, or :class:`torch.Size`, optional): Size of the sparse tensor. If not
- provided the size will be inferred as the minimum size big enough to hold all non-zero
- elements.
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if None, infers data type from :attr:`values`.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if None, uses the current device for the default tensor type
- (see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- {requires_grad}
- {check_invariants}
- Example::
- >>> i = torch.tensor([[0, 1, 1],
- ... [2, 0, 2]])
- >>> v = torch.tensor([3, 4, 5], dtype=torch.float32)
- >>> torch.sparse_coo_tensor(i, v, [2, 4])
- tensor(indices=tensor([[0, 1, 1],
- [2, 0, 2]]),
- values=tensor([3., 4., 5.]),
- size=(2, 4), nnz=3, layout=torch.sparse_coo)
- >>> torch.sparse_coo_tensor(i, v) # Shape inference
- tensor(indices=tensor([[0, 1, 1],
- [2, 0, 2]]),
- values=tensor([3., 4., 5.]),
- size=(2, 3), nnz=3, layout=torch.sparse_coo)
- >>> torch.sparse_coo_tensor(i, v, [2, 4],
- ... dtype=torch.float64,
- ... device=torch.device('cuda:0'))
- tensor(indices=tensor([[0, 1, 1],
- [2, 0, 2]]),
- values=tensor([3., 4., 5.]),
- device='cuda:0', size=(2, 4), nnz=3, dtype=torch.float64,
- layout=torch.sparse_coo)
- # Create an empty sparse tensor with the following invariants:
- # 1. sparse_dim + dense_dim = len(SparseTensor.shape)
- # 2. SparseTensor._indices().shape = (sparse_dim, nnz)
- # 3. SparseTensor._values().shape = (nnz, SparseTensor.shape[sparse_dim:])
- #
- # For instance, to create an empty sparse tensor with nnz = 0, dense_dim = 0 and
- # sparse_dim = 1 (hence indices is a 2D tensor of shape = (1, 0))
- >>> S = torch.sparse_coo_tensor(torch.empty([1, 0]), [], [1])
- tensor(indices=tensor([], size=(1, 0)),
- values=tensor([], size=(0,)),
- size=(1,), nnz=0, layout=torch.sparse_coo)
- # and to create an empty sparse tensor with nnz = 0, dense_dim = 1 and
- # sparse_dim = 1
- >>> S = torch.sparse_coo_tensor(torch.empty([1, 0]), torch.empty([0, 2]), [1, 2])
- tensor(indices=tensor([], size=(1, 0)),
- values=tensor([], size=(0, 2)),
- size=(1, 2), nnz=0, layout=torch.sparse_coo)
- .. _torch.sparse: https://pytorch.org/docs/stable/sparse.html
- """.format(
- **factory_common_args
- ),
- )
- add_docstr(
- torch.sqrt,
- r"""
- sqrt(input, *, out=None) -> Tensor
- Returns a new tensor with the square-root of the elements of :attr:`input`.
- .. math::
- \text{out}_{i} = \sqrt{\text{input}_{i}}
- """
- + r"""
- Args:
- {input}
- Keyword args:
- {out}
- Example::
- >>> a = torch.randn(4)
- >>> a
- tensor([-2.0755, 1.0226, 0.0831, 0.4806])
- >>> torch.sqrt(a)
- tensor([ nan, 1.0112, 0.2883, 0.6933])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.square,
- r"""
- square(input, *, out=None) -> Tensor
- Returns a new tensor with the square of the elements of :attr:`input`.
- Args:
- {input}
- Keyword args:
- {out}
- Example::
- >>> a = torch.randn(4)
- >>> a
- tensor([-2.0755, 1.0226, 0.0831, 0.4806])
- >>> torch.square(a)
- tensor([ 4.3077, 1.0457, 0.0069, 0.2310])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.squeeze,
- r"""
- squeeze(input, dim=None) -> Tensor
- Returns a tensor with all specified dimensions of :attr:`input` of size `1` removed.
- For example, if `input` is of shape:
- :math:`(A \times 1 \times B \times C \times 1 \times D)` then the `input.squeeze()`
- will be of shape: :math:`(A \times B \times C \times D)`.
- When :attr:`dim` is given, a squeeze operation is done only in the given
- dimension(s). If `input` is of shape: :math:`(A \times 1 \times B)`,
- ``squeeze(input, 0)`` leaves the tensor unchanged, but ``squeeze(input, 1)``
- will squeeze the tensor to the shape :math:`(A \times B)`.
- .. note:: The returned tensor shares the storage with the input tensor,
- so changing the contents of one will change the contents of the other.
- .. warning:: If the tensor has a batch dimension of size 1, then `squeeze(input)`
- will also remove the batch dimension, which can lead to unexpected
- errors. Consider specifying only the dims you wish to be squeezed.
- Args:
- {input}
- dim (int or tuple of ints, optional): if given, the input will be squeezed
- only in the specified dimensions.
- .. versionchanged:: 2.0
- :attr:`dim` now accepts tuples of dimensions.
- Example::
- >>> x = torch.zeros(2, 1, 2, 1, 2)
- >>> x.size()
- torch.Size([2, 1, 2, 1, 2])
- >>> y = torch.squeeze(x)
- >>> y.size()
- torch.Size([2, 2, 2])
- >>> y = torch.squeeze(x, 0)
- >>> y.size()
- torch.Size([2, 1, 2, 1, 2])
- >>> y = torch.squeeze(x, 1)
- >>> y.size()
- torch.Size([2, 2, 1, 2])
- >>> y = torch.squeeze(x, (1, 2, 3))
- torch.Size([2, 2, 2])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.std,
- r"""
- std(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor
- Calculates the standard deviation over the dimensions specified by :attr:`dim`.
- :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to
- reduce over all dimensions.
- The standard deviation (:math:`\sigma`) is calculated as
- .. math:: \sigma = \sqrt{\frac{1}{N - \delta N}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
- where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
- sample mean, :math:`N` is the number of samples and :math:`\delta N` is
- the :attr:`correction`.
- """
- + r"""
- {keepdim_details}
- Args:
- {input}
- {dim}
- Keyword args:
- correction (int): difference between the sample size and sample degrees of freedom.
- Defaults to `Bessel's correction`_, ``correction=1``.
- .. versionchanged:: 2.0
- Previously this argument was called ``unbiased`` and was a boolean
- with ``True`` corresponding to ``correction=1`` and ``False`` being
- ``correction=0``.
- {keepdim}
- {out}
- Example:
- >>> a = torch.tensor(
- ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
- ... [ 1.5027, -0.3270, 0.5905, 0.6538],
- ... [-1.5745, 1.3330, -0.5596, -0.6548],
- ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
- >>> torch.std(a, dim=1, keepdim=True)
- tensor([[1.0311],
- [0.7477],
- [1.2204],
- [0.9087]])
- .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
- """.format(
- **multi_dim_common
- ),
- )
- add_docstr(
- torch.std_mean,
- r"""
- std_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor)
- Calculates the standard deviation and mean over the dimensions specified by
- :attr:`dim`. :attr:`dim` can be a single dimension, list of dimensions, or
- ``None`` to reduce over all dimensions.
- The standard deviation (:math:`\sigma`) is calculated as
- .. math:: \sigma = \sqrt{\frac{1}{N - \delta N}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
- where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
- sample mean, :math:`N` is the number of samples and :math:`\delta N` is
- the :attr:`correction`.
- """
- + r"""
- {keepdim_details}
- Args:
- {input}
- {opt_dim}
- Keyword args:
- correction (int): difference between the sample size and sample degrees of freedom.
- Defaults to `Bessel's correction`_, ``correction=1``.
- .. versionchanged:: 2.0
- Previously this argument was called ``unbiased`` and was a boolean
- with ``True`` corresponding to ``correction=1`` and ``False`` being
- ``correction=0``.
- {keepdim}
- {out}
- Returns:
- A tuple (std, mean) containing the standard deviation and mean.
- Example:
- >>> a = torch.tensor(
- ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
- ... [ 1.5027, -0.3270, 0.5905, 0.6538],
- ... [-1.5745, 1.3330, -0.5596, -0.6548],
- ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
- >>> torch.std_mean(a, dim=0, keepdim=True)
- (tensor([[1.2620, 1.0028, 1.0957, 0.6038]]),
- tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]]))
- .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
- """.format(
- **multi_dim_common
- ),
- )
- add_docstr(
- torch.sub,
- r"""
- sub(input, other, *, alpha=1, out=None) -> Tensor
- Subtracts :attr:`other`, scaled by :attr:`alpha`, from :attr:`input`.
- .. math::
- \text{{out}}_i = \text{{input}}_i - \text{{alpha}} \times \text{{other}}_i
- """
- + r"""
- Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
- :ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
- Args:
- {input}
- other (Tensor or Number): the tensor or number to subtract from :attr:`input`.
- Keyword args:
- alpha (Number): the multiplier for :attr:`other`.
- {out}
- Example::
- >>> a = torch.tensor((1, 2))
- >>> b = torch.tensor((0, 1))
- >>> torch.sub(a, b, alpha=2)
- tensor([1, 0])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.subtract,
- r"""
- subtract(input, other, *, alpha=1, out=None) -> Tensor
- Alias for :func:`torch.sub`.
- """,
- )
- add_docstr(
- torch.sum,
- r"""
- sum(input, *, dtype=None) -> Tensor
- Returns the sum of all elements in the :attr:`input` tensor.
- Args:
- {input}
- Keyword args:
- {dtype}
- Example::
- >>> a = torch.randn(1, 3)
- >>> a
- tensor([[ 0.1133, -0.9567, 0.2958]])
- >>> torch.sum(a)
- tensor(-0.5475)
- .. function:: sum(input, dim, keepdim=False, *, dtype=None) -> Tensor
- :noindex:
- Returns the sum of each row of the :attr:`input` tensor in the given
- dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
- reduce over all of them.
- {keepdim_details}
- Args:
- {input}
- {opt_dim}
- {keepdim}
- Keyword args:
- {dtype}
- Example::
- >>> a = torch.randn(4, 4)
- >>> a
- tensor([[ 0.0569, -0.2475, 0.0737, -0.3429],
- [-0.2993, 0.9138, 0.9337, -1.6864],
- [ 0.1132, 0.7892, -0.1003, 0.5688],
- [ 0.3637, -0.9906, -0.4752, -1.5197]])
- >>> torch.sum(a, 1)
- tensor([-0.4598, -0.1381, 1.3708, -2.6217])
- >>> b = torch.arange(4 * 5 * 6).view(4, 5, 6)
- >>> torch.sum(b, (2, 1))
- tensor([ 435., 1335., 2235., 3135.])
- """.format(
- **multi_dim_common
- ),
- )
- add_docstr(
- torch.nansum,
- r"""
- nansum(input, *, dtype=None) -> Tensor
- Returns the sum of all elements, treating Not a Numbers (NaNs) as zero.
- Args:
- {input}
- Keyword args:
- {dtype}
- Example::
- >>> a = torch.tensor([1., 2., float('nan'), 4.])
- >>> torch.nansum(a)
- tensor(7.)
- .. function:: nansum(input, dim, keepdim=False, *, dtype=None) -> Tensor
- :noindex:
- Returns the sum of each row of the :attr:`input` tensor in the given
- dimension :attr:`dim`, treating Not a Numbers (NaNs) as zero.
- If :attr:`dim` is a list of dimensions, reduce over all of them.
- {keepdim_details}
- Args:
- {input}
- {opt_dim}
- {keepdim}
- Keyword args:
- {dtype}
- Example::
- >>> torch.nansum(torch.tensor([1., float("nan")]))
- 1.0
- >>> a = torch.tensor([[1, 2], [3., float("nan")]])
- >>> torch.nansum(a)
- tensor(6.)
- >>> torch.nansum(a, dim=0)
- tensor([4., 2.])
- >>> torch.nansum(a, dim=1)
- tensor([3., 3.])
- """.format(
- **multi_dim_common
- ),
- )
- add_docstr(
- torch.svd,
- r"""
- svd(input, some=True, compute_uv=True, *, out=None) -> (Tensor, Tensor, Tensor)
- Computes the singular value decomposition of either a matrix or batch of
- matrices :attr:`input`. The singular value decomposition is represented as a
- namedtuple `(U, S, V)`, such that :attr:`input` :math:`= U \text{diag}(S) V^{\text{H}}`.
- where :math:`V^{\text{H}}` is the transpose of `V` for real inputs,
- and the conjugate transpose of `V` for complex inputs.
- If :attr:`input` is a batch of matrices, then `U`, `S`, and `V` are also
- batched with the same batch dimensions as :attr:`input`.
- If :attr:`some` is `True` (default), the method returns the reduced singular
- value decomposition. In this case, if the last two dimensions of :attr:`input` are
- `m` and `n`, then the returned `U` and `V` matrices will contain only
- `min(n, m)` orthonormal columns.
- If :attr:`compute_uv` is `False`, the returned `U` and `V` will be
- zero-filled matrices of shape `(m, m)` and `(n, n)`
- respectively, and the same device as :attr:`input`. The argument :attr:`some`
- has no effect when :attr:`compute_uv` is `False`.
- Supports :attr:`input` of float, double, cfloat and cdouble data types.
- The dtypes of `U` and `V` are the same as :attr:`input`'s. `S` will
- always be real-valued, even if :attr:`input` is complex.
- .. warning::
- :func:`torch.svd` is deprecated in favor of :func:`torch.linalg.svd`
- and will be removed in a future PyTorch release.
- ``U, S, V = torch.svd(A, some=some, compute_uv=True)`` (default) should be replaced with
- .. code:: python
- U, S, Vh = torch.linalg.svd(A, full_matrices=not some)
- V = Vh.mH
- ``_, S, _ = torch.svd(A, some=some, compute_uv=False)`` should be replaced with
- .. code:: python
- S = torch.linalg.svdvals(A)
- .. note:: Differences with :func:`torch.linalg.svd`:
- * :attr:`some` is the opposite of
- :func:`torch.linalg.svd`'s :attr:`full_matrices`. Note that
- default value for both is `True`, so the default behavior is
- effectively the opposite.
- * :func:`torch.svd` returns `V`, whereas :func:`torch.linalg.svd` returns
- `Vh`, that is, :math:`V^{\text{H}}`.
- * If :attr:`compute_uv` is `False`, :func:`torch.svd` returns zero-filled
- tensors for `U` and `Vh`, whereas :func:`torch.linalg.svd` returns
- empty tensors.
- .. note:: The singular values are returned in descending order. If :attr:`input` is a batch of matrices,
- then the singular values of each matrix in the batch are returned in descending order.
- .. note:: The `S` tensor can only be used to compute gradients if :attr:`compute_uv` is `True`.
- .. note:: When :attr:`some` is `False`, the gradients on `U[..., :, min(m, n):]`
- and `V[..., :, min(m, n):]` will be ignored in the backward pass, as those vectors
- can be arbitrary bases of the corresponding subspaces.
- .. note:: The implementation of :func:`torch.linalg.svd` on CPU uses LAPACK's routine `?gesdd`
- (a divide-and-conquer algorithm) instead of `?gesvd` for speed. Analogously,
- on GPU, it uses cuSOLVER's routines `gesvdj` and `gesvdjBatched` on CUDA 10.1.243
- and later, and MAGMA's routine `gesdd` on earlier versions of CUDA.
- .. note:: The returned `U` will not be contiguous. The matrix (or batch of matrices) will
- be represented as a column-major matrix (i.e. Fortran-contiguous).
- .. warning:: The gradients with respect to `U` and `V` will only be finite when the input does not
- have zero nor repeated singular values.
- .. warning:: If the distance between any two singular values is close to zero, the gradients with respect to
- `U` and `V` will be numerically unstable, as they depends on
- :math:`\frac{1}{\min_{i \neq j} \sigma_i^2 - \sigma_j^2}`. The same happens when the matrix
- has small singular values, as these gradients also depend on `S⁻¹`.
- .. warning:: For complex-valued :attr:`input` the singular value decomposition is not unique,
- as `U` and `V` may be multiplied by an arbitrary phase factor :math:`e^{i \phi}` on every column.
- The same happens when :attr:`input` has repeated singular values, where one may multiply
- the columns of the spanning subspace in `U` and `V` by a rotation matrix
- and `the resulting vectors will span the same subspace`_.
- Different platforms, like NumPy, or inputs on different device types,
- may produce different `U` and `V` tensors.
- Args:
- input (Tensor): the input tensor of size `(*, m, n)` where `*` is zero or more
- batch dimensions consisting of `(m, n)` matrices.
- some (bool, optional): controls whether to compute the reduced or full decomposition, and
- consequently, the shape of returned `U` and `V`. Default: `True`.
- compute_uv (bool, optional): controls whether to compute `U` and `V`. Default: `True`.
- Keyword args:
- out (tuple, optional): the output tuple of tensors
- Example::
- >>> a = torch.randn(5, 3)
- >>> a
- tensor([[ 0.2364, -0.7752, 0.6372],
- [ 1.7201, 0.7394, -0.0504],
- [-0.3371, -1.0584, 0.5296],
- [ 0.3550, -0.4022, 1.5569],
- [ 0.2445, -0.0158, 1.1414]])
- >>> u, s, v = torch.svd(a)
- >>> u
- tensor([[ 0.4027, 0.0287, 0.5434],
- [-0.1946, 0.8833, 0.3679],
- [ 0.4296, -0.2890, 0.5261],
- [ 0.6604, 0.2717, -0.2618],
- [ 0.4234, 0.2481, -0.4733]])
- >>> s
- tensor([2.3289, 2.0315, 0.7806])
- >>> v
- tensor([[-0.0199, 0.8766, 0.4809],
- [-0.5080, 0.4054, -0.7600],
- [ 0.8611, 0.2594, -0.4373]])
- >>> torch.dist(a, torch.mm(torch.mm(u, torch.diag(s)), v.t()))
- tensor(8.6531e-07)
- >>> a_big = torch.randn(7, 5, 3)
- >>> u, s, v = torch.svd(a_big)
- >>> torch.dist(a_big, torch.matmul(torch.matmul(u, torch.diag_embed(s)), v.mT))
- tensor(2.6503e-06)
- .. _the resulting vectors will span the same subspace:
- (https://en.wikipedia.org/wiki/Singular_value_decomposition#Singular_values,_singular_vectors,_and_their_relation_to_the_SVD)
- """,
- )
- add_docstr(
- torch.t,
- r"""
- t(input) -> Tensor
- Expects :attr:`input` to be <= 2-D tensor and transposes dimensions 0
- and 1.
- 0-D and 1-D tensors are returned as is. When input is a 2-D tensor this
- is equivalent to ``transpose(input, 0, 1)``.
- Args:
- {input}
- Example::
- >>> x = torch.randn(())
- >>> x
- tensor(0.1995)
- >>> torch.t(x)
- tensor(0.1995)
- >>> x = torch.randn(3)
- >>> x
- tensor([ 2.4320, -0.4608, 0.7702])
- >>> torch.t(x)
- tensor([ 2.4320, -0.4608, 0.7702])
- >>> x = torch.randn(2, 3)
- >>> x
- tensor([[ 0.4875, 0.9158, -0.5872],
- [ 0.3938, -0.6929, 0.6932]])
- >>> torch.t(x)
- tensor([[ 0.4875, 0.3938],
- [ 0.9158, -0.6929],
- [-0.5872, 0.6932]])
- See also :func:`torch.transpose`.
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.flip,
- r"""
- flip(input, dims) -> Tensor
- Reverse the order of an n-D tensor along given axis in dims.
- .. note::
- `torch.flip` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.flip`,
- which returns a view in constant time. Since copying a tensor's data is more work than viewing that data,
- `torch.flip` is expected to be slower than `np.flip`.
- Args:
- {input}
- dims (a list or tuple): axis to flip on
- Example::
- >>> x = torch.arange(8).view(2, 2, 2)
- >>> x
- tensor([[[ 0, 1],
- [ 2, 3]],
- [[ 4, 5],
- [ 6, 7]]])
- >>> torch.flip(x, [0, 1])
- tensor([[[ 6, 7],
- [ 4, 5]],
- [[ 2, 3],
- [ 0, 1]]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.fliplr,
- r"""
- fliplr(input) -> Tensor
- Flip tensor in the left/right direction, returning a new tensor.
- Flip the entries in each row in the left/right direction.
- Columns are preserved, but appear in a different order than before.
- Note:
- Requires the tensor to be at least 2-D.
- .. note::
- `torch.fliplr` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.fliplr`,
- which returns a view in constant time. Since copying a tensor's data is more work than viewing that data,
- `torch.fliplr` is expected to be slower than `np.fliplr`.
- Args:
- input (Tensor): Must be at least 2-dimensional.
- Example::
- >>> x = torch.arange(4).view(2, 2)
- >>> x
- tensor([[0, 1],
- [2, 3]])
- >>> torch.fliplr(x)
- tensor([[1, 0],
- [3, 2]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.flipud,
- r"""
- flipud(input) -> Tensor
- Flip tensor in the up/down direction, returning a new tensor.
- Flip the entries in each column in the up/down direction.
- Rows are preserved, but appear in a different order than before.
- Note:
- Requires the tensor to be at least 1-D.
- .. note::
- `torch.flipud` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.flipud`,
- which returns a view in constant time. Since copying a tensor's data is more work than viewing that data,
- `torch.flipud` is expected to be slower than `np.flipud`.
- Args:
- input (Tensor): Must be at least 1-dimensional.
- Example::
- >>> x = torch.arange(4).view(2, 2)
- >>> x
- tensor([[0, 1],
- [2, 3]])
- >>> torch.flipud(x)
- tensor([[2, 3],
- [0, 1]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.roll,
- r"""
- roll(input, shifts, dims=None) -> Tensor
- Roll the tensor :attr:`input` along the given dimension(s). Elements that are
- shifted beyond the last position are re-introduced at the first position. If
- :attr:`dims` is `None`, the tensor will be flattened before rolling and then
- restored to the original shape.
- Args:
- {input}
- shifts (int or tuple of ints): The number of places by which the elements
- of the tensor are shifted. If shifts is a tuple, dims must be a tuple of
- the same size, and each dimension will be rolled by the corresponding
- value
- dims (int or tuple of ints): Axis along which to roll
- Example::
- >>> x = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8]).view(4, 2)
- >>> x
- tensor([[1, 2],
- [3, 4],
- [5, 6],
- [7, 8]])
- >>> torch.roll(x, 1)
- tensor([[8, 1],
- [2, 3],
- [4, 5],
- [6, 7]])
- >>> torch.roll(x, 1, 0)
- tensor([[7, 8],
- [1, 2],
- [3, 4],
- [5, 6]])
- >>> torch.roll(x, -1, 0)
- tensor([[3, 4],
- [5, 6],
- [7, 8],
- [1, 2]])
- >>> torch.roll(x, shifts=(2, 1), dims=(0, 1))
- tensor([[6, 5],
- [8, 7],
- [2, 1],
- [4, 3]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.rot90,
- r"""
- rot90(input, k=1, dims=[0,1]) -> Tensor
- Rotate an n-D tensor by 90 degrees in the plane specified by dims axis.
- Rotation direction is from the first towards the second axis if k > 0, and from the second towards the first for k < 0.
- Args:
- {input}
- k (int): number of times to rotate. Default value is 1
- dims (a list or tuple): axis to rotate. Default value is [0, 1]
- Example::
- >>> x = torch.arange(4).view(2, 2)
- >>> x
- tensor([[0, 1],
- [2, 3]])
- >>> torch.rot90(x, 1, [0, 1])
- tensor([[1, 3],
- [0, 2]])
- >>> x = torch.arange(8).view(2, 2, 2)
- >>> x
- tensor([[[0, 1],
- [2, 3]],
- [[4, 5],
- [6, 7]]])
- >>> torch.rot90(x, 1, [1, 2])
- tensor([[[1, 3],
- [0, 2]],
- [[5, 7],
- [4, 6]]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.take,
- r"""
- take(input, index) -> Tensor
- Returns a new tensor with the elements of :attr:`input` at the given indices.
- The input tensor is treated as if it were viewed as a 1-D tensor. The result
- takes the same shape as the indices.
- Args:
- {input}
- index (LongTensor): the indices into tensor
- Example::
- >>> src = torch.tensor([[4, 3, 5],
- ... [6, 7, 8]])
- >>> torch.take(src, torch.tensor([0, 2, 5]))
- tensor([ 4, 5, 8])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.take_along_dim,
- r"""
- take_along_dim(input, indices, dim, *, out=None) -> Tensor
- Selects values from :attr:`input` at the 1-dimensional indices from :attr:`indices` along the given :attr:`dim`.
- Functions that return indices along a dimension, like :func:`torch.argmax` and :func:`torch.argsort`,
- are designed to work with this function. See the examples below.
- .. note::
- This function is similar to NumPy's `take_along_axis`.
- See also :func:`torch.gather`.
- Args:
- {input}
- indices (tensor): the indices into :attr:`input`. Must have long dtype.
- dim (int): dimension to select along.
- Keyword args:
- {out}
- Example::
- >>> t = torch.tensor([[10, 30, 20], [60, 40, 50]])
- >>> max_idx = torch.argmax(t)
- >>> torch.take_along_dim(t, max_idx)
- tensor([60])
- >>> sorted_idx = torch.argsort(t, dim=1)
- >>> torch.take_along_dim(t, sorted_idx, dim=1)
- tensor([[10, 20, 30],
- [40, 50, 60]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.tan,
- r"""
- tan(input, *, out=None) -> Tensor
- Returns a new tensor with the tangent of the elements of :attr:`input`.
- .. math::
- \text{out}_{i} = \tan(\text{input}_{i})
- """
- + r"""
- Args:
- {input}
- Keyword args:
- {out}
- Example::
- >>> a = torch.randn(4)
- >>> a
- tensor([-1.2027, -1.7687, 0.4412, -1.3856])
- >>> torch.tan(a)
- tensor([-2.5930, 4.9859, 0.4722, -5.3366])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.tanh,
- r"""
- tanh(input, *, out=None) -> Tensor
- Returns a new tensor with the hyperbolic tangent of the elements
- of :attr:`input`.
- .. math::
- \text{out}_{i} = \tanh(\text{input}_{i})
- """
- + r"""
- Args:
- {input}
- Keyword args:
- {out}
- Example::
- >>> a = torch.randn(4)
- >>> a
- tensor([ 0.8986, -0.7279, 1.1745, 0.2611])
- >>> torch.tanh(a)
- tensor([ 0.7156, -0.6218, 0.8257, 0.2553])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- # torch.softmax doc str. Point this to torch.nn.functional.softmax
- torch.softmax,
- r"""
- softmax(input, dim, *, dtype=None) -> Tensor
- Alias for :func:`torch.nn.functional.softmax`.
- """,
- )
- add_docstr(
- torch.topk,
- r"""
- topk(input, k, dim=None, largest=True, sorted=True, *, out=None) -> (Tensor, LongTensor)
- Returns the :attr:`k` largest elements of the given :attr:`input` tensor along
- a given dimension.
- If :attr:`dim` is not given, the last dimension of the `input` is chosen.
- If :attr:`largest` is ``False`` then the `k` smallest elements are returned.
- A namedtuple of `(values, indices)` is returned with the `values` and
- `indices` of the largest `k` elements of each row of the `input` tensor in the
- given dimension `dim`.
- The boolean option :attr:`sorted` if ``True``, will make sure that the returned
- `k` elements are themselves sorted
- Args:
- {input}
- k (int): the k in "top-k"
- dim (int, optional): the dimension to sort along
- largest (bool, optional): controls whether to return largest or
- smallest elements
- sorted (bool, optional): controls whether to return the elements
- in sorted order
- Keyword args:
- out (tuple, optional): the output tuple of (Tensor, LongTensor) that can be
- optionally given to be used as output buffers
- Example::
- >>> x = torch.arange(1., 6.)
- >>> x
- tensor([ 1., 2., 3., 4., 5.])
- >>> torch.topk(x, 3)
- torch.return_types.topk(values=tensor([5., 4., 3.]), indices=tensor([4, 3, 2]))
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.trace,
- r"""
- trace(input) -> Tensor
- Returns the sum of the elements of the diagonal of the input 2-D matrix.
- Example::
- >>> x = torch.arange(1., 10.).view(3, 3)
- >>> x
- tensor([[ 1., 2., 3.],
- [ 4., 5., 6.],
- [ 7., 8., 9.]])
- >>> torch.trace(x)
- tensor(15.)
- """,
- )
- add_docstr(
- torch.transpose,
- r"""
- transpose(input, dim0, dim1) -> Tensor
- Returns a tensor that is a transposed version of :attr:`input`.
- The given dimensions :attr:`dim0` and :attr:`dim1` are swapped.
- If :attr:`input` is a strided tensor then the resulting :attr:`out`
- tensor shares its underlying storage with the :attr:`input` tensor, so
- changing the content of one would change the content of the other.
- If :attr:`input` is a :ref:`sparse tensor <sparse-docs>` then the
- resulting :attr:`out` tensor *does not* share the underlying storage
- with the :attr:`input` tensor.
- If :attr:`input` is a :ref:`sparse tensor <sparse-docs>` with compressed
- layout (SparseCSR, SparseBSR, SparseCSC or SparseBSC) the arguments
- :attr:`dim0` and :attr:`dim1` must be both batch dimensions, or must
- both be sparse dimensions. The batch dimensions of a sparse tensor are the
- dimensions preceding the sparse dimensions.
- .. note::
- Transpositions which interchange the sparse dimensions of a `SparseCSR`
- or `SparseCSC` layout tensor will result in the layout changing between
- the two options. Transposition of the sparse dimensions of a ` SparseBSR`
- or `SparseBSC` layout tensor will likewise generate a result with the
- opposite layout.
- Args:
- {input}
- dim0 (int): the first dimension to be transposed
- dim1 (int): the second dimension to be transposed
- Example::
- >>> x = torch.randn(2, 3)
- >>> x
- tensor([[ 1.0028, -0.9893, 0.5809],
- [-0.1669, 0.7299, 0.4942]])
- >>> torch.transpose(x, 0, 1)
- tensor([[ 1.0028, -0.1669],
- [-0.9893, 0.7299],
- [ 0.5809, 0.4942]])
- See also :func:`torch.t`.
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.triangular_solve,
- r"""
- triangular_solve(b, A, upper=True, transpose=False, unitriangular=False, *, out=None) -> (Tensor, Tensor)
- Solves a system of equations with a square upper or lower triangular invertible matrix :math:`A`
- and multiple right-hand sides :math:`b`.
- In symbols, it solves :math:`AX = b` and assumes :math:`A` is square upper-triangular
- (or lower-triangular if :attr:`upper`\ `= False`) and does not have zeros on the diagonal.
- `torch.triangular_solve(b, A)` can take in 2D inputs `b, A` or inputs that are
- batches of 2D matrices. If the inputs are batches, then returns
- batched outputs `X`
- If the diagonal of :attr:`A` contains zeros or elements that are very close to zero and
- :attr:`unitriangular`\ `= False` (default) or if the input matrix is badly conditioned,
- the result may contain `NaN` s.
- Supports input of float, double, cfloat and cdouble data types.
- .. warning::
- :func:`torch.triangular_solve` is deprecated in favor of :func:`torch.linalg.solve_triangular`
- and will be removed in a future PyTorch release.
- :func:`torch.linalg.solve_triangular` has its arguments reversed and does not return a
- copy of one of the inputs.
- ``X = torch.triangular_solve(B, A).solution`` should be replaced with
- .. code:: python
- X = torch.linalg.solve_triangular(A, B)
- Args:
- b (Tensor): multiple right-hand sides of size :math:`(*, m, k)` where
- :math:`*` is zero of more batch dimensions
- A (Tensor): the input triangular coefficient matrix of size :math:`(*, m, m)`
- where :math:`*` is zero or more batch dimensions
- upper (bool, optional): whether :math:`A` is upper or lower triangular. Default: ``True``.
- transpose (bool, optional): solves `op(A)X = b` where `op(A) = A^T` if this flag is ``True``,
- and `op(A) = A` if it is ``False``. Default: ``False``.
- unitriangular (bool, optional): whether :math:`A` is unit triangular.
- If True, the diagonal elements of :math:`A` are assumed to be
- 1 and not referenced from :math:`A`. Default: ``False``.
- Keyword args:
- out ((Tensor, Tensor), optional): tuple of two tensors to write
- the output to. Ignored if `None`. Default: `None`.
- Returns:
- A namedtuple `(solution, cloned_coefficient)` where `cloned_coefficient`
- is a clone of :math:`A` and `solution` is the solution :math:`X` to :math:`AX = b`
- (or whatever variant of the system of equations, depending on the keyword arguments.)
- Examples::
- >>> A = torch.randn(2, 2).triu()
- >>> A
- tensor([[ 1.1527, -1.0753],
- [ 0.0000, 0.7986]])
- >>> b = torch.randn(2, 3)
- >>> b
- tensor([[-0.0210, 2.3513, -1.5492],
- [ 1.5429, 0.7403, -1.0243]])
- >>> torch.triangular_solve(b, A)
- torch.return_types.triangular_solve(
- solution=tensor([[ 1.7841, 2.9046, -2.5405],
- [ 1.9320, 0.9270, -1.2826]]),
- cloned_coefficient=tensor([[ 1.1527, -1.0753],
- [ 0.0000, 0.7986]]))
- """,
- )
- add_docstr(
- torch.tril,
- r"""
- tril(input, diagonal=0, *, out=None) -> Tensor
- Returns the lower triangular part of the matrix (2-D tensor) or batch of matrices
- :attr:`input`, the other elements of the result tensor :attr:`out` are set to 0.
- The lower triangular part of the matrix is defined as the elements on and
- below the diagonal.
- The argument :attr:`diagonal` controls which diagonal to consider. If
- :attr:`diagonal` = 0, all elements on and below the main diagonal are
- retained. A positive value includes just as many diagonals above the main
- diagonal, and similarly a negative value excludes just as many diagonals below
- the main diagonal. The main diagonal are the set of indices
- :math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` where
- :math:`d_{1}, d_{2}` are the dimensions of the matrix.
- """
- + r"""
- Args:
- {input}
- diagonal (int, optional): the diagonal to consider
- Keyword args:
- {out}
- Example::
- >>> a = torch.randn(3, 3)
- >>> a
- tensor([[-1.0813, -0.8619, 0.7105],
- [ 0.0935, 0.1380, 2.2112],
- [-0.3409, -0.9828, 0.0289]])
- >>> torch.tril(a)
- tensor([[-1.0813, 0.0000, 0.0000],
- [ 0.0935, 0.1380, 0.0000],
- [-0.3409, -0.9828, 0.0289]])
- >>> b = torch.randn(4, 6)
- >>> b
- tensor([[ 1.2219, 0.5653, -0.2521, -0.2345, 1.2544, 0.3461],
- [ 0.4785, -0.4477, 0.6049, 0.6368, 0.8775, 0.7145],
- [ 1.1502, 3.2716, -1.1243, -0.5413, 0.3615, 0.6864],
- [-0.0614, -0.7344, -1.3164, -0.7648, -1.4024, 0.0978]])
- >>> torch.tril(b, diagonal=1)
- tensor([[ 1.2219, 0.5653, 0.0000, 0.0000, 0.0000, 0.0000],
- [ 0.4785, -0.4477, 0.6049, 0.0000, 0.0000, 0.0000],
- [ 1.1502, 3.2716, -1.1243, -0.5413, 0.0000, 0.0000],
- [-0.0614, -0.7344, -1.3164, -0.7648, -1.4024, 0.0000]])
- >>> torch.tril(b, diagonal=-1)
- tensor([[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
- [ 0.4785, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
- [ 1.1502, 3.2716, 0.0000, 0.0000, 0.0000, 0.0000],
- [-0.0614, -0.7344, -1.3164, 0.0000, 0.0000, 0.0000]])
- """.format(
- **common_args
- ),
- )
- # docstr is split in two parts to avoid format mis-captureing :math: braces '{}'
- # as common args.
- add_docstr(
- torch.tril_indices,
- r"""
- tril_indices(row, col, offset=0, *, dtype=torch.long, device='cpu', layout=torch.strided) -> Tensor
- Returns the indices of the lower triangular part of a :attr:`row`-by-
- :attr:`col` matrix in a 2-by-N Tensor, where the first row contains row
- coordinates of all indices and the second row contains column coordinates.
- Indices are ordered based on rows and then columns.
- The lower triangular part of the matrix is defined as the elements on and
- below the diagonal.
- The argument :attr:`offset` controls which diagonal to consider. If
- :attr:`offset` = 0, all elements on and below the main diagonal are
- retained. A positive value includes just as many diagonals above the main
- diagonal, and similarly a negative value excludes just as many diagonals below
- the main diagonal. The main diagonal are the set of indices
- :math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]`
- where :math:`d_{1}, d_{2}` are the dimensions of the matrix.
- .. note::
- When running on CUDA, ``row * col`` must be less than :math:`2^{59}` to
- prevent overflow during calculation.
- """
- + r"""
- Args:
- row (``int``): number of rows in the 2-D matrix.
- col (``int``): number of columns in the 2-D matrix.
- offset (``int``): diagonal offset from the main diagonal.
- Default: if not provided, 0.
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, ``torch.long``.
- {device}
- layout (:class:`torch.layout`, optional): currently only support ``torch.strided``.
- Example::
- >>> a = torch.tril_indices(3, 3)
- >>> a
- tensor([[0, 1, 1, 2, 2, 2],
- [0, 0, 1, 0, 1, 2]])
- >>> a = torch.tril_indices(4, 3, -1)
- >>> a
- tensor([[1, 2, 2, 3, 3, 3],
- [0, 0, 1, 0, 1, 2]])
- >>> a = torch.tril_indices(4, 3, 1)
- >>> a
- tensor([[0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3],
- [0, 1, 0, 1, 2, 0, 1, 2, 0, 1, 2]])
- """.format(
- **factory_common_args
- ),
- )
- add_docstr(
- torch.triu,
- r"""
- triu(input, diagonal=0, *, out=None) -> Tensor
- Returns the upper triangular part of a matrix (2-D tensor) or batch of matrices
- :attr:`input`, the other elements of the result tensor :attr:`out` are set to 0.
- The upper triangular part of the matrix is defined as the elements on and
- above the diagonal.
- The argument :attr:`diagonal` controls which diagonal to consider. If
- :attr:`diagonal` = 0, all elements on and above the main diagonal are
- retained. A positive value excludes just as many diagonals above the main
- diagonal, and similarly a negative value includes just as many diagonals below
- the main diagonal. The main diagonal are the set of indices
- :math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` where
- :math:`d_{1}, d_{2}` are the dimensions of the matrix.
- """
- + r"""
- Args:
- {input}
- diagonal (int, optional): the diagonal to consider
- Keyword args:
- {out}
- Example::
- >>> a = torch.randn(3, 3)
- >>> a
- tensor([[ 0.2309, 0.5207, 2.0049],
- [ 0.2072, -1.0680, 0.6602],
- [ 0.3480, -0.5211, -0.4573]])
- >>> torch.triu(a)
- tensor([[ 0.2309, 0.5207, 2.0049],
- [ 0.0000, -1.0680, 0.6602],
- [ 0.0000, 0.0000, -0.4573]])
- >>> torch.triu(a, diagonal=1)
- tensor([[ 0.0000, 0.5207, 2.0049],
- [ 0.0000, 0.0000, 0.6602],
- [ 0.0000, 0.0000, 0.0000]])
- >>> torch.triu(a, diagonal=-1)
- tensor([[ 0.2309, 0.5207, 2.0049],
- [ 0.2072, -1.0680, 0.6602],
- [ 0.0000, -0.5211, -0.4573]])
- >>> b = torch.randn(4, 6)
- >>> b
- tensor([[ 0.5876, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235],
- [-0.2447, 0.9556, -1.2919, 1.3378, -0.1768, -1.0857],
- [ 0.4333, 0.3146, 0.6576, -1.0432, 0.9348, -0.4410],
- [-0.9888, 1.0679, -1.3337, -1.6556, 0.4798, 0.2830]])
- >>> torch.triu(b, diagonal=1)
- tensor([[ 0.0000, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235],
- [ 0.0000, 0.0000, -1.2919, 1.3378, -0.1768, -1.0857],
- [ 0.0000, 0.0000, 0.0000, -1.0432, 0.9348, -0.4410],
- [ 0.0000, 0.0000, 0.0000, 0.0000, 0.4798, 0.2830]])
- >>> torch.triu(b, diagonal=-1)
- tensor([[ 0.5876, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235],
- [-0.2447, 0.9556, -1.2919, 1.3378, -0.1768, -1.0857],
- [ 0.0000, 0.3146, 0.6576, -1.0432, 0.9348, -0.4410],
- [ 0.0000, 0.0000, -1.3337, -1.6556, 0.4798, 0.2830]])
- """.format(
- **common_args
- ),
- )
- # docstr is split in two parts to avoid format mis-capturing :math: braces '{}'
- # as common args.
- add_docstr(
- torch.triu_indices,
- r"""
- triu_indices(row, col, offset=0, *, dtype=torch.long, device='cpu', layout=torch.strided) -> Tensor
- Returns the indices of the upper triangular part of a :attr:`row` by
- :attr:`col` matrix in a 2-by-N Tensor, where the first row contains row
- coordinates of all indices and the second row contains column coordinates.
- Indices are ordered based on rows and then columns.
- The upper triangular part of the matrix is defined as the elements on and
- above the diagonal.
- The argument :attr:`offset` controls which diagonal to consider. If
- :attr:`offset` = 0, all elements on and above the main diagonal are
- retained. A positive value excludes just as many diagonals above the main
- diagonal, and similarly a negative value includes just as many diagonals below
- the main diagonal. The main diagonal are the set of indices
- :math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]`
- where :math:`d_{1}, d_{2}` are the dimensions of the matrix.
- .. note::
- When running on CUDA, ``row * col`` must be less than :math:`2^{59}` to
- prevent overflow during calculation.
- """
- + r"""
- Args:
- row (``int``): number of rows in the 2-D matrix.
- col (``int``): number of columns in the 2-D matrix.
- offset (``int``): diagonal offset from the main diagonal.
- Default: if not provided, 0.
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, ``torch.long``.
- {device}
- layout (:class:`torch.layout`, optional): currently only support ``torch.strided``.
- Example::
- >>> a = torch.triu_indices(3, 3)
- >>> a
- tensor([[0, 0, 0, 1, 1, 2],
- [0, 1, 2, 1, 2, 2]])
- >>> a = torch.triu_indices(4, 3, -1)
- >>> a
- tensor([[0, 0, 0, 1, 1, 1, 2, 2, 3],
- [0, 1, 2, 0, 1, 2, 1, 2, 2]])
- >>> a = torch.triu_indices(4, 3, 1)
- >>> a
- tensor([[0, 0, 1],
- [1, 2, 2]])
- """.format(
- **factory_common_args
- ),
- )
- add_docstr(
- torch.true_divide,
- r"""
- true_divide(dividend, divisor, *, out) -> Tensor
- Alias for :func:`torch.div` with ``rounding_mode=None``.
- """,
- )
- add_docstr(
- torch.trunc,
- r"""
- trunc(input, *, out=None) -> Tensor
- Returns a new tensor with the truncated integer values of
- the elements of :attr:`input`.
- For integer inputs, follows the array-api convention of returning a
- copy of the input tensor.
- Args:
- {input}
- Keyword args:
- {out}
- Example::
- >>> a = torch.randn(4)
- >>> a
- tensor([ 3.4742, 0.5466, -0.8008, -0.9079])
- >>> torch.trunc(a)
- tensor([ 3., 0., -0., -0.])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.fake_quantize_per_tensor_affine,
- r"""
- fake_quantize_per_tensor_affine(input, scale, zero_point, quant_min, quant_max) -> Tensor
- Returns a new tensor with the data in :attr:`input` fake quantized using :attr:`scale`,
- :attr:`zero_point`, :attr:`quant_min` and :attr:`quant_max`.
- .. math::
- \text{output} = min(
- \text{quant\_max},
- max(
- \text{quant\_min},
- \text{std::nearby\_int}(\text{input} / \text{scale}) + \text{zero\_point}
- )
- )
- Args:
- input (Tensor): the input value(s), ``torch.float32`` tensor
- scale (double scalar or ``float32`` Tensor): quantization scale
- zero_point (int64 scalar or ``int32`` Tensor): quantization zero_point
- quant_min (int64): lower bound of the quantized domain
- quant_max (int64): upper bound of the quantized domain
- Returns:
- Tensor: A newly fake_quantized ``torch.float32`` tensor
- Example::
- >>> x = torch.randn(4)
- >>> x
- tensor([ 0.0552, 0.9730, 0.3973, -1.0780])
- >>> torch.fake_quantize_per_tensor_affine(x, 0.1, 0, 0, 255)
- tensor([0.1000, 1.0000, 0.4000, 0.0000])
- >>> torch.fake_quantize_per_tensor_affine(x, torch.tensor(0.1), torch.tensor(0), 0, 255)
- tensor([0.6000, 0.4000, 0.0000, 0.0000])
- """,
- )
- add_docstr(
- torch.fake_quantize_per_channel_affine,
- r"""
- fake_quantize_per_channel_affine(input, scale, zero_point, quant_min, quant_max) -> Tensor
- Returns a new tensor with the data in :attr:`input` fake quantized per channel using :attr:`scale`,
- :attr:`zero_point`, :attr:`quant_min` and :attr:`quant_max`, across the channel specified by :attr:`axis`.
- .. math::
- \text{output} = min(
- \text{quant\_max},
- max(
- \text{quant\_min},
- \text{std::nearby\_int}(\text{input} / \text{scale}) + \text{zero\_point}
- )
- )
- Args:
- input (Tensor): the input value(s), in ``torch.float32``
- scale (Tensor): quantization scale, per channel in ``torch.float32``
- zero_point (Tensor): quantization zero_point, per channel in ``torch.int32`` or ``torch.half`` or ``torch.float32``
- axis (int32): channel axis
- quant_min (int64): lower bound of the quantized domain
- quant_max (int64): upper bound of the quantized domain
- Returns:
- Tensor: A newly fake_quantized per channel ``torch.float32`` tensor
- Example::
- >>> x = torch.randn(2, 2, 2)
- >>> x
- tensor([[[-0.2525, -0.0466],
- [ 0.3491, -0.2168]],
- [[-0.5906, 1.6258],
- [ 0.6444, -0.0542]]])
- >>> scales = (torch.randn(2) + 1) * 0.05
- >>> scales
- tensor([0.0475, 0.0486])
- >>> zero_points = torch.zeros(2).to(torch.int32)
- >>> zero_points
- tensor([0, 0])
- >>> torch.fake_quantize_per_channel_affine(x, scales, zero_points, 1, 0, 255)
- tensor([[[0.0000, 0.0000],
- [0.3405, 0.0000]],
- [[0.0000, 1.6134],
- [0.6323, 0.0000]]])
- """,
- )
- add_docstr(
- torch.fix,
- r"""
- fix(input, *, out=None) -> Tensor
- Alias for :func:`torch.trunc`
- """,
- )
- add_docstr(
- torch.unsqueeze,
- r"""
- unsqueeze(input, dim) -> Tensor
- Returns a new tensor with a dimension of size one inserted at the
- specified position.
- The returned tensor shares the same underlying data with this tensor.
- A :attr:`dim` value within the range ``[-input.dim() - 1, input.dim() + 1)``
- can be used. Negative :attr:`dim` will correspond to :meth:`unsqueeze`
- applied at :attr:`dim` = ``dim + input.dim() + 1``.
- Args:
- {input}
- dim (int): the index at which to insert the singleton dimension
- Example::
- >>> x = torch.tensor([1, 2, 3, 4])
- >>> torch.unsqueeze(x, 0)
- tensor([[ 1, 2, 3, 4]])
- >>> torch.unsqueeze(x, 1)
- tensor([[ 1],
- [ 2],
- [ 3],
- [ 4]])
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.var,
- r"""
- var(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor
- Calculates the variance over the dimensions specified by :attr:`dim`. :attr:`dim`
- can be a single dimension, list of dimensions, or ``None`` to reduce over all
- dimensions.
- The variance (:math:`\sigma^2`) is calculated as
- .. math:: \sigma^2 = \frac{1}{N - \delta N}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
- where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
- sample mean, :math:`N` is the number of samples and :math:`\delta N` is
- the :attr:`correction`.
- """
- + r"""
- {keepdim_details}
- Args:
- {input}
- {opt_dim}
- Keyword args:
- correction (int): difference between the sample size and sample degrees of freedom.
- Defaults to `Bessel's correction`_, ``correction=1``.
- .. versionchanged:: 2.0
- Previously this argument was called ``unbiased`` and was a boolean
- with ``True`` corresponding to ``correction=1`` and ``False`` being
- ``correction=0``.
- {keepdim}
- {out}
- Example:
- >>> a = torch.tensor(
- ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
- ... [ 1.5027, -0.3270, 0.5905, 0.6538],
- ... [-1.5745, 1.3330, -0.5596, -0.6548],
- ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
- >>> torch.var(a, dim=1, keepdim=True)
- tensor([[1.0631],
- [0.5590],
- [1.4893],
- [0.8258]])
- .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
- """.format(
- **multi_dim_common
- ),
- )
- add_docstr(
- torch.var_mean,
- r"""
- var_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor)
- Calculates the variance and mean over the dimensions specified by :attr:`dim`.
- :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to
- reduce over all dimensions.
- The variance (:math:`\sigma^2`) is calculated as
- .. math:: \sigma^2 = \frac{1}{N - \delta N}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
- where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
- sample mean, :math:`N` is the number of samples and :math:`\delta N` is
- the :attr:`correction`.
- """
- + r"""
- {keepdim_details}
- Args:
- {input}
- {opt_dim}
- Keyword args:
- correction (int): difference between the sample size and sample degrees of freedom.
- Defaults to `Bessel's correction`_, ``correction=1``.
- .. versionchanged:: 2.0
- Previously this argument was called ``unbiased`` and was a boolean
- with ``True`` corresponding to ``correction=1`` and ``False`` being
- ``correction=0``.
- {keepdim}
- {out}
- Returns:
- A tuple (var, mean) containing the variance and mean.
- Example:
- >>> a = torch.tensor(
- ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
- ... [ 1.5027, -0.3270, 0.5905, 0.6538],
- ... [-1.5745, 1.3330, -0.5596, -0.6548],
- ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
- >>> torch.var_mean(a, dim=0, keepdim=True)
- (tensor([[1.5926, 1.0056, 1.2005, 0.3646]]),
- tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]]))
- .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
- """.format(
- **multi_dim_common
- ),
- )
- add_docstr(
- torch.zeros,
- r"""
- zeros(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
- Returns a tensor filled with the scalar value `0`, with the shape defined
- by the variable argument :attr:`size`.
- Args:
- size (int...): a sequence of integers defining the shape of the output tensor.
- Can be a variable number of arguments or a collection like a list or tuple.
- Keyword args:
- {out}
- {dtype}
- {layout}
- {device}
- {requires_grad}
- Example::
- >>> torch.zeros(2, 3)
- tensor([[ 0., 0., 0.],
- [ 0., 0., 0.]])
- >>> torch.zeros(5)
- tensor([ 0., 0., 0., 0., 0.])
- """.format(
- **factory_common_args
- ),
- )
- add_docstr(
- torch.zeros_like,
- r"""
- zeros_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
- Returns a tensor filled with the scalar value `0`, with the same size as
- :attr:`input`. ``torch.zeros_like(input)`` is equivalent to
- ``torch.zeros(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
- .. warning::
- As of 0.4, this function does not support an :attr:`out` keyword. As an alternative,
- the old ``torch.zeros_like(input, out=output)`` is equivalent to
- ``torch.zeros(input.size(), out=output)``.
- Args:
- {input}
- Keyword args:
- {dtype}
- {layout}
- {device}
- {requires_grad}
- {memory_format}
- Example::
- >>> input = torch.empty(2, 3)
- >>> torch.zeros_like(input)
- tensor([[ 0., 0., 0.],
- [ 0., 0., 0.]])
- """.format(
- **factory_like_common_args
- ),
- )
- add_docstr(
- torch.empty,
- """
- empty(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False, \
- memory_format=torch.contiguous_format) -> Tensor
- Returns a tensor filled with uninitialized data. The shape of the tensor is
- defined by the variable argument :attr:`size`.
- Args:
- size (int...): a sequence of integers defining the shape of the output tensor.
- Can be a variable number of arguments or a collection like a list or tuple.
- Keyword args:
- {out}
- {dtype}
- {layout}
- {device}
- {requires_grad}
- {pin_memory}
- {memory_format}
- Example::
- >>> torch.empty((2,3), dtype=torch.int64)
- tensor([[ 9.4064e+13, 2.8000e+01, 9.3493e+13],
- [ 7.5751e+18, 7.1428e+18, 7.5955e+18]])
- """.format(
- **factory_common_args
- ),
- )
- add_docstr(
- torch.empty_like,
- r"""
- empty_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
- Returns an uninitialized tensor with the same size as :attr:`input`.
- ``torch.empty_like(input)`` is equivalent to
- ``torch.empty(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
- Args:
- {input}
- Keyword args:
- {dtype}
- {layout}
- {device}
- {requires_grad}
- {memory_format}
- Example::
- >>> a=torch.empty((2,3), dtype=torch.int32, device = 'cuda')
- >>> torch.empty_like(a)
- tensor([[0, 0, 0],
- [0, 0, 0]], device='cuda:0', dtype=torch.int32)
- """.format(
- **factory_like_common_args
- ),
- )
- add_docstr(
- torch.empty_strided,
- r"""
- empty_strided(size, stride, *, dtype=None, layout=None, device=None, requires_grad=False, pin_memory=False) -> Tensor
- Creates a tensor with the specified :attr:`size` and :attr:`stride` and filled with undefined data.
- .. warning::
- If the constructed tensor is "overlapped" (with multiple indices referring to the same element
- in memory) its behavior is undefined.
- Args:
- size (tuple of int): the shape of the output tensor
- stride (tuple of int): the strides of the output tensor
- Keyword args:
- {dtype}
- {layout}
- {device}
- {requires_grad}
- {pin_memory}
- Example::
- >>> a = torch.empty_strided((2, 3), (1, 2))
- >>> a
- tensor([[8.9683e-44, 4.4842e-44, 5.1239e+07],
- [0.0000e+00, 0.0000e+00, 3.0705e-41]])
- >>> a.stride()
- (1, 2)
- >>> a.size()
- torch.Size([2, 3])
- """.format(
- **factory_common_args
- ),
- )
- add_docstr(
- torch.full,
- r"""
- full(size, fill_value, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
- Creates a tensor of size :attr:`size` filled with :attr:`fill_value`. The
- tensor's dtype is inferred from :attr:`fill_value`.
- Args:
- size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
- shape of the output tensor.
- fill_value (Scalar): the value to fill the output tensor with.
- Keyword args:
- {out}
- {dtype}
- {layout}
- {device}
- {requires_grad}
- Example::
- >>> torch.full((2, 3), 3.141592)
- tensor([[ 3.1416, 3.1416, 3.1416],
- [ 3.1416, 3.1416, 3.1416]])
- """.format(
- **factory_common_args
- ),
- )
- add_docstr(
- torch.full_like,
- """
- full_like(input, fill_value, \\*, dtype=None, layout=torch.strided, device=None, requires_grad=False, \
- memory_format=torch.preserve_format) -> Tensor
- Returns a tensor with the same size as :attr:`input` filled with :attr:`fill_value`.
- ``torch.full_like(input, fill_value)`` is equivalent to
- ``torch.full(input.size(), fill_value, dtype=input.dtype, layout=input.layout, device=input.device)``.
- Args:
- {input}
- fill_value: the number to fill the output tensor with.
- Keyword args:
- {dtype}
- {layout}
- {device}
- {requires_grad}
- {memory_format}
- """.format(
- **factory_like_common_args
- ),
- )
- add_docstr(
- torch.det,
- r"""
- det(input) -> Tensor
- Alias for :func:`torch.linalg.det`
- """,
- )
- add_docstr(
- torch.where,
- r"""
- where(condition, input, other, *, out=None) -> Tensor
- Return a tensor of elements selected from either :attr:`input` or :attr:`other`, depending on :attr:`condition`.
- The operation is defined as:
- .. math::
- \text{out}_i = \begin{cases}
- \text{input}_i & \text{if } \text{condition}_i \\
- \text{other}_i & \text{otherwise} \\
- \end{cases}
- """
- + r"""
- .. note::
- The tensors :attr:`condition`, :attr:`input`, :attr:`other` must be :ref:`broadcastable <broadcasting-semantics>`.
- Arguments:
- condition (BoolTensor): When True (nonzero), yield input, otherwise yield other
- input (Tensor or Scalar): value (if :attr:`input` is a scalar) or values selected at indices
- where :attr:`condition` is ``True``
- other (Tensor or Scalar): value (if :attr:`other` is a scalar) or values selected at indices
- where :attr:`condition` is ``False``
- Keyword args:
- {out}
- Returns:
- Tensor: A tensor of shape equal to the broadcasted shape of :attr:`condition`, :attr:`input`, :attr:`other`
- Example::
- >>> x = torch.randn(3, 2)
- >>> y = torch.ones(3, 2)
- >>> x
- tensor([[-0.4620, 0.3139],
- [ 0.3898, -0.7197],
- [ 0.0478, -0.1657]])
- >>> torch.where(x > 0, 1.0, 0.0)
- tensor([[0., 1.],
- [1., 0.],
- [1., 0.]])
- >>> torch.where(x > 0, x, y)
- tensor([[ 1.0000, 0.3139],
- [ 0.3898, 1.0000],
- [ 0.0478, 1.0000]])
- >>> x = torch.randn(2, 2, dtype=torch.double)
- >>> x
- tensor([[ 1.0779, 0.0383],
- [-0.8785, -1.1089]], dtype=torch.float64)
- >>> torch.where(x > 0, x, 0.)
- tensor([[1.0779, 0.0383],
- [0.0000, 0.0000]], dtype=torch.float64)
- .. function:: where(condition) -> tuple of LongTensor
- :noindex:
- ``torch.where(condition)`` is identical to
- ``torch.nonzero(condition, as_tuple=True)``.
- .. note::
- See also :func:`torch.nonzero`.
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.logdet,
- r"""
- logdet(input) -> Tensor
- Calculates log determinant of a square matrix or batches of square matrices.
- It returns ``-inf`` if the input has a determinant of zero, and ``NaN`` if it has
- a negative determinant.
- .. note::
- Backward through :meth:`logdet` internally uses SVD results when :attr:`input`
- is not invertible. In this case, double backward through :meth:`logdet` will
- be unstable in when :attr:`input` doesn't have distinct singular values. See
- :func:`torch.linalg.svd` for details.
- .. seealso::
- :func:`torch.linalg.slogdet` computes the sign (resp. angle) and natural logarithm of the
- absolute value of the determinant of real-valued (resp. complex) square matrices.
- Arguments:
- input (Tensor): the input tensor of size ``(*, n, n)`` where ``*`` is zero or more
- batch dimensions.
- Example::
- >>> A = torch.randn(3, 3)
- >>> torch.det(A)
- tensor(0.2611)
- >>> torch.logdet(A)
- tensor(-1.3430)
- >>> A
- tensor([[[ 0.9254, -0.6213],
- [-0.5787, 1.6843]],
- [[ 0.3242, -0.9665],
- [ 0.4539, -0.0887]],
- [[ 1.1336, -0.4025],
- [-0.7089, 0.9032]]])
- >>> A.det()
- tensor([1.1990, 0.4099, 0.7386])
- >>> A.det().log()
- tensor([ 0.1815, -0.8917, -0.3031])
- """,
- )
- add_docstr(
- torch.slogdet,
- r"""
- slogdet(input) -> (Tensor, Tensor)
- Alias for :func:`torch.linalg.slogdet`
- """,
- )
- add_docstr(
- torch.pinverse,
- r"""
- pinverse(input, rcond=1e-15) -> Tensor
- Alias for :func:`torch.linalg.pinv`
- """,
- )
- add_docstr(
- torch.hann_window,
- """
- hann_window(window_length, periodic=True, *, dtype=None, \
- layout=torch.strided, device=None, requires_grad=False) -> Tensor
- """
- + r"""
- Hann window function.
- .. math::
- w[n] = \frac{1}{2}\ \left[1 - \cos \left( \frac{2 \pi n}{N - 1} \right)\right] =
- \sin^2 \left( \frac{\pi n}{N - 1} \right),
- where :math:`N` is the full window size.
- The input :attr:`window_length` is a positive integer controlling the
- returned window size. :attr:`periodic` flag determines whether the returned
- window trims off the last duplicate value from the symmetric window and is
- ready to be used as a periodic window with functions like
- :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
- above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
- ``torch.hann_window(L, periodic=True)`` equal to
- ``torch.hann_window(L + 1, periodic=False)[:-1])``.
- .. note::
- If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
- """
- + r"""
- Arguments:
- window_length (int): the size of returned window
- periodic (bool, optional): If True, returns a window to be used as periodic
- function. If False, return a symmetric window.
- Keyword args:
- {dtype} Only floating point types are supported.
- layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
- ``torch.strided`` (dense layout) is supported.
- {device}
- {requires_grad}
- Returns:
- Tensor: A 1-D tensor of size :math:`(\text{{window\_length}},)` containing the window
- """.format(
- **factory_common_args
- ),
- )
- add_docstr(
- torch.hamming_window,
- """
- hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype=None, \
- layout=torch.strided, device=None, requires_grad=False) -> Tensor
- """
- + r"""
- Hamming window function.
- .. math::
- w[n] = \alpha - \beta\ \cos \left( \frac{2 \pi n}{N - 1} \right),
- where :math:`N` is the full window size.
- The input :attr:`window_length` is a positive integer controlling the
- returned window size. :attr:`periodic` flag determines whether the returned
- window trims off the last duplicate value from the symmetric window and is
- ready to be used as a periodic window with functions like
- :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
- above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
- ``torch.hamming_window(L, periodic=True)`` equal to
- ``torch.hamming_window(L + 1, periodic=False)[:-1])``.
- .. note::
- If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
- .. note::
- This is a generalized version of :meth:`torch.hann_window`.
- """
- + r"""
- Arguments:
- window_length (int): the size of returned window
- periodic (bool, optional): If True, returns a window to be used as periodic
- function. If False, return a symmetric window.
- alpha (float, optional): The coefficient :math:`\alpha` in the equation above
- beta (float, optional): The coefficient :math:`\beta` in the equation above
- Keyword args:
- {dtype} Only floating point types are supported.
- layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
- ``torch.strided`` (dense layout) is supported.
- {device}
- {requires_grad}
- Returns:
- Tensor: A 1-D tensor of size :math:`(\text{{window\_length}},)` containing the window.
- """.format(
- **factory_common_args
- ),
- )
- add_docstr(
- torch.bartlett_window,
- """
- bartlett_window(window_length, periodic=True, *, dtype=None, \
- layout=torch.strided, device=None, requires_grad=False) -> Tensor
- """
- + r"""
- Bartlett window function.
- .. math::
- w[n] = 1 - \left| \frac{2n}{N-1} - 1 \right| = \begin{cases}
- \frac{2n}{N - 1} & \text{if } 0 \leq n \leq \frac{N - 1}{2} \\
- 2 - \frac{2n}{N - 1} & \text{if } \frac{N - 1}{2} < n < N \\
- \end{cases},
- where :math:`N` is the full window size.
- The input :attr:`window_length` is a positive integer controlling the
- returned window size. :attr:`periodic` flag determines whether the returned
- window trims off the last duplicate value from the symmetric window and is
- ready to be used as a periodic window with functions like
- :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
- above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
- ``torch.bartlett_window(L, periodic=True)`` equal to
- ``torch.bartlett_window(L + 1, periodic=False)[:-1])``.
- .. note::
- If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
- """
- + r"""
- Arguments:
- window_length (int): the size of returned window
- periodic (bool, optional): If True, returns a window to be used as periodic
- function. If False, return a symmetric window.
- Keyword args:
- {dtype} Only floating point types are supported.
- layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
- ``torch.strided`` (dense layout) is supported.
- {device}
- {requires_grad}
- Returns:
- Tensor: A 1-D tensor of size :math:`(\text{{window\_length}},)` containing the window
- """.format(
- **factory_common_args
- ),
- )
- add_docstr(
- torch.blackman_window,
- """
- blackman_window(window_length, periodic=True, *, dtype=None, \
- layout=torch.strided, device=None, requires_grad=False) -> Tensor
- """
- + r"""
- Blackman window function.
- .. math::
- w[n] = 0.42 - 0.5 \cos \left( \frac{2 \pi n}{N - 1} \right) + 0.08 \cos \left( \frac{4 \pi n}{N - 1} \right)
- where :math:`N` is the full window size.
- The input :attr:`window_length` is a positive integer controlling the
- returned window size. :attr:`periodic` flag determines whether the returned
- window trims off the last duplicate value from the symmetric window and is
- ready to be used as a periodic window with functions like
- :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
- above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
- ``torch.blackman_window(L, periodic=True)`` equal to
- ``torch.blackman_window(L + 1, periodic=False)[:-1])``.
- .. note::
- If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
- """
- + r"""
- Arguments:
- window_length (int): the size of returned window
- periodic (bool, optional): If True, returns a window to be used as periodic
- function. If False, return a symmetric window.
- Keyword args:
- {dtype} Only floating point types are supported.
- layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
- ``torch.strided`` (dense layout) is supported.
- {device}
- {requires_grad}
- Returns:
- Tensor: A 1-D tensor of size :math:`(\text{{window\_length}},)` containing the window
- """.format(
- **factory_common_args
- ),
- )
- add_docstr(
- torch.kaiser_window,
- """
- kaiser_window(window_length, periodic=True, beta=12.0, *, dtype=None, \
- layout=torch.strided, device=None, requires_grad=False) -> Tensor
- """
- + r"""
- Computes the Kaiser window with window length :attr:`window_length` and shape parameter :attr:`beta`.
- Let I_0 be the zeroth order modified Bessel function of the first kind (see :func:`torch.i0`) and
- ``N = L - 1`` if :attr:`periodic` is False and ``L`` if :attr:`periodic` is True,
- where ``L`` is the :attr:`window_length`. This function computes:
- .. math::
- out_i = I_0 \left( \beta \sqrt{1 - \left( {\frac{i - N/2}{N/2}} \right) ^2 } \right) / I_0( \beta )
- Calling ``torch.kaiser_window(L, B, periodic=True)`` is equivalent to calling
- ``torch.kaiser_window(L + 1, B, periodic=False)[:-1])``.
- The :attr:`periodic` argument is intended as a helpful shorthand
- to produce a periodic window as input to functions like :func:`torch.stft`.
- .. note::
- If :attr:`window_length` is one, then the returned window is a single element tensor containing a one.
- """
- + r"""
- Args:
- window_length (int): length of the window.
- periodic (bool, optional): If True, returns a periodic window suitable for use in spectral analysis.
- If False, returns a symmetric window suitable for use in filter design.
- beta (float, optional): shape parameter for the window.
- Keyword args:
- {dtype}
- layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
- ``torch.strided`` (dense layout) is supported.
- {device}
- {requires_grad}
- """.format(
- **factory_common_args
- ),
- )
- add_docstr(
- torch.vander,
- """
- vander(x, N=None, increasing=False) -> Tensor
- """
- + r"""
- Generates a Vandermonde matrix.
- The columns of the output matrix are elementwise powers of the input vector :math:`x^{{(N-1)}}, x^{{(N-2)}}, ..., x^0`.
- If increasing is True, the order of the columns is reversed :math:`x^0, x^1, ..., x^{{(N-1)}}`. Such a
- matrix with a geometric progression in each row is named for Alexandre-Theophile Vandermonde.
- Arguments:
- x (Tensor): 1-D input tensor.
- N (int, optional): Number of columns in the output. If N is not specified,
- a square array is returned :math:`(N = len(x))`.
- increasing (bool, optional): Order of the powers of the columns. If True,
- the powers increase from left to right, if False (the default) they are reversed.
- Returns:
- Tensor: Vandermonde matrix. If increasing is False, the first column is :math:`x^{{(N-1)}}`,
- the second :math:`x^{{(N-2)}}` and so forth. If increasing is True, the columns
- are :math:`x^0, x^1, ..., x^{{(N-1)}}`.
- Example::
- >>> x = torch.tensor([1, 2, 3, 5])
- >>> torch.vander(x)
- tensor([[ 1, 1, 1, 1],
- [ 8, 4, 2, 1],
- [ 27, 9, 3, 1],
- [125, 25, 5, 1]])
- >>> torch.vander(x, N=3)
- tensor([[ 1, 1, 1],
- [ 4, 2, 1],
- [ 9, 3, 1],
- [25, 5, 1]])
- >>> torch.vander(x, N=3, increasing=True)
- tensor([[ 1, 1, 1],
- [ 1, 2, 4],
- [ 1, 3, 9],
- [ 1, 5, 25]])
- """.format(
- **factory_common_args
- ),
- )
- add_docstr(
- torch.unbind,
- r"""
- unbind(input, dim=0) -> seq
- Removes a tensor dimension.
- Returns a tuple of all slices along a given dimension, already without it.
- Arguments:
- input (Tensor): the tensor to unbind
- dim (int): dimension to remove
- Example::
- >>> torch.unbind(torch.tensor([[1, 2, 3],
- >>> [4, 5, 6],
- >>> [7, 8, 9]]))
- (tensor([1, 2, 3]), tensor([4, 5, 6]), tensor([7, 8, 9]))
- """,
- )
- add_docstr(
- torch.combinations,
- r"""
- combinations(input, r=2, with_replacement=False) -> seq
- Compute combinations of length :math:`r` of the given tensor. The behavior is similar to
- python's `itertools.combinations` when `with_replacement` is set to `False`, and
- `itertools.combinations_with_replacement` when `with_replacement` is set to `True`.
- Arguments:
- input (Tensor): 1D vector.
- r (int, optional): number of elements to combine
- with_replacement (bool, optional): whether to allow duplication in combination
- Returns:
- Tensor: A tensor equivalent to converting all the input tensors into lists, do
- `itertools.combinations` or `itertools.combinations_with_replacement` on these
- lists, and finally convert the resulting list into tensor.
- Example::
- >>> a = [1, 2, 3]
- >>> list(itertools.combinations(a, r=2))
- [(1, 2), (1, 3), (2, 3)]
- >>> list(itertools.combinations(a, r=3))
- [(1, 2, 3)]
- >>> list(itertools.combinations_with_replacement(a, r=2))
- [(1, 1), (1, 2), (1, 3), (2, 2), (2, 3), (3, 3)]
- >>> tensor_a = torch.tensor(a)
- >>> torch.combinations(tensor_a)
- tensor([[1, 2],
- [1, 3],
- [2, 3]])
- >>> torch.combinations(tensor_a, r=3)
- tensor([[1, 2, 3]])
- >>> torch.combinations(tensor_a, with_replacement=True)
- tensor([[1, 1],
- [1, 2],
- [1, 3],
- [2, 2],
- [2, 3],
- [3, 3]])
- """,
- )
- add_docstr(
- torch.trapezoid,
- r"""
- trapezoid(y, x=None, *, dx=None, dim=-1) -> Tensor
- Computes the `trapezoidal rule <https://en.wikipedia.org/wiki/Trapezoidal_rule>`_ along
- :attr:`dim`. By default the spacing between elements is assumed to be 1, but
- :attr:`dx` can be used to specify a different constant spacing, and :attr:`x` can be
- used to specify arbitrary spacing along :attr:`dim`.
- Assuming :attr:`y` is a one-dimensional tensor with elements :math:`{y_0, y_1, ..., y_n}`,
- the default computation is
- .. math::
- \begin{aligned}
- \sum_{i = 1}^{n-1} \frac{1}{2} (y_i + y_{i-1})
- \end{aligned}
- When :attr:`dx` is specified the computation becomes
- .. math::
- \begin{aligned}
- \sum_{i = 1}^{n-1} \frac{\Delta x}{2} (y_i + y_{i-1})
- \end{aligned}
- effectively multiplying the result by :attr:`dx`. When :attr:`x` is specified,
- assuming :attr:`x` is also a one-dimensional tensor with
- elements :math:`{x_0, x_1, ..., x_n}`, the computation becomes
- .. math::
- \begin{aligned}
- \sum_{i = 1}^{n-1} \frac{(x_i - x_{i-1})}{2} (y_i + y_{i-1})
- \end{aligned}
- When :attr:`x` and :attr:`y` have the same size, the computation is as described above and no broadcasting is needed.
- The broadcasting behavior of this function is as follows when their sizes are different. For both :attr:`x`
- and :attr:`y`, the function computes the difference between consecutive elements along
- dimension :attr:`dim`. This effectively creates two tensors, `x_diff` and `y_diff`, that have
- the same shape as the original tensors except their lengths along the dimension :attr:`dim` is reduced by 1.
- After that, those two tensors are broadcast together to compute final output as part of the trapezoidal rule.
- See the examples below for details.
- .. note::
- The trapezoidal rule is a technique for approximating the definite integral of a function
- by averaging its left and right Riemann sums. The approximation becomes more accurate as
- the resolution of the partition increases.
- Arguments:
- y (Tensor): Values to use when computing the trapezoidal rule.
- x (Tensor): If specified, defines spacing between values as specified above.
- Keyword arguments:
- dx (float): constant spacing between values. If neither :attr:`x` or :attr:`dx`
- are specified then this defaults to 1. Effectively multiplies the result by its value.
- dim (int): The dimension along which to compute the trapezoidal rule.
- The last (inner-most) dimension by default.
- Examples::
- >>> # Computes the trapezoidal rule in 1D, spacing is implicitly 1
- >>> y = torch.tensor([1, 5, 10])
- >>> torch.trapezoid(y)
- tensor(10.5)
- >>> # Computes the same trapezoidal rule directly to verify
- >>> (1 + 10 + 10) / 2
- 10.5
- >>> # Computes the trapezoidal rule in 1D with constant spacing of 2
- >>> # NOTE: the result is the same as before, but multiplied by 2
- >>> torch.trapezoid(y, dx=2)
- 21.0
- >>> # Computes the trapezoidal rule in 1D with arbitrary spacing
- >>> x = torch.tensor([1, 3, 6])
- >>> torch.trapezoid(y, x)
- 28.5
- >>> # Computes the same trapezoidal rule directly to verify
- >>> ((3 - 1) * (1 + 5) + (6 - 3) * (5 + 10)) / 2
- 28.5
- >>> # Computes the trapezoidal rule for each row of a 3x3 matrix
- >>> y = torch.arange(9).reshape(3, 3)
- tensor([[0, 1, 2],
- [3, 4, 5],
- [6, 7, 8]])
- >>> torch.trapezoid(y)
- tensor([ 2., 8., 14.])
- >>> # Computes the trapezoidal rule for each column of the matrix
- >>> torch.trapezoid(y, dim=0)
- tensor([ 6., 8., 10.])
- >>> # Computes the trapezoidal rule for each row of a 3x3 ones matrix
- >>> # with the same arbitrary spacing
- >>> y = torch.ones(3, 3)
- >>> x = torch.tensor([1, 3, 6])
- >>> torch.trapezoid(y, x)
- array([5., 5., 5.])
- >>> # Computes the trapezoidal rule for each row of a 3x3 ones matrix
- >>> # with different arbitrary spacing per row
- >>> y = torch.ones(3, 3)
- >>> x = torch.tensor([[1, 2, 3], [1, 3, 5], [1, 4, 7]])
- >>> torch.trapezoid(y, x)
- array([2., 4., 6.])
- """,
- )
- add_docstr(
- torch.trapz,
- r"""
- trapz(y, x, *, dim=-1) -> Tensor
- Alias for :func:`torch.trapezoid`.
- """,
- )
- add_docstr(
- torch.cumulative_trapezoid,
- r"""
- cumulative_trapezoid(y, x=None, *, dx=None, dim=-1) -> Tensor
- Cumulatively computes the `trapezoidal rule <https://en.wikipedia.org/wiki/Trapezoidal_rule>`_
- along :attr:`dim`. By default the spacing between elements is assumed to be 1, but
- :attr:`dx` can be used to specify a different constant spacing, and :attr:`x` can be
- used to specify arbitrary spacing along :attr:`dim`.
- For more details, please read :func:`torch.trapezoid`. The difference between :func:`torch.trapezoid`
- and this function is that, :func:`torch.trapezoid` returns a value for each integration,
- where as this function returns a cumulative value for every spacing within the integration. This
- is analogous to how `.sum` returns a value and `.cumsum` returns a cumulative sum.
- Arguments:
- y (Tensor): Values to use when computing the trapezoidal rule.
- x (Tensor): If specified, defines spacing between values as specified above.
- Keyword arguments:
- dx (float): constant spacing between values. If neither :attr:`x` or :attr:`dx`
- are specified then this defaults to 1. Effectively multiplies the result by its value.
- dim (int): The dimension along which to compute the trapezoidal rule.
- The last (inner-most) dimension by default.
- Examples::
- >>> # Cumulatively computes the trapezoidal rule in 1D, spacing is implicitly 1.
- >>> y = torch.tensor([1, 5, 10])
- >>> torch.cumulative_trapezoid(y)
- tensor([3., 10.5])
- >>> # Computes the same trapezoidal rule directly up to each element to verify
- >>> (1 + 5) / 2
- 3.0
- >>> (1 + 10 + 10) / 2
- 10.5
- >>> # Cumulatively computes the trapezoidal rule in 1D with constant spacing of 2
- >>> # NOTE: the result is the same as before, but multiplied by 2
- >>> torch.cumulative_trapezoid(y, dx=2)
- tensor([6., 21.])
- >>> # Cumulatively computes the trapezoidal rule in 1D with arbitrary spacing
- >>> x = torch.tensor([1, 3, 6])
- >>> torch.cumulative_trapezoid(y, x)
- tensor([6., 28.5])
- >>> # Computes the same trapezoidal rule directly up to each element to verify
- >>> ((3 - 1) * (1 + 5)) / 2
- 6.0
- >>> ((3 - 1) * (1 + 5) + (6 - 3) * (5 + 10)) / 2
- 28.5
- >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 matrix
- >>> y = torch.arange(9).reshape(3, 3)
- tensor([[0, 1, 2],
- [3, 4, 5],
- [6, 7, 8]])
- >>> torch.cumulative_trapezoid(y)
- tensor([[ 0.5, 2.],
- [ 3.5, 8.],
- [ 6.5, 14.]])
- >>> # Cumulatively computes the trapezoidal rule for each column of the matrix
- >>> torch.cumulative_trapezoid(y, dim=0)
- tensor([[ 1.5, 2.5, 3.5],
- [ 6.0, 8.0, 10.0]])
- >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 ones matrix
- >>> # with the same arbitrary spacing
- >>> y = torch.ones(3, 3)
- >>> x = torch.tensor([1, 3, 6])
- >>> torch.cumulative_trapezoid(y, x)
- tensor([[2., 5.],
- [2., 5.],
- [2., 5.]])
- >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 ones matrix
- >>> # with different arbitrary spacing per row
- >>> y = torch.ones(3, 3)
- >>> x = torch.tensor([[1, 2, 3], [1, 3, 5], [1, 4, 7]])
- >>> torch.cumulative_trapezoid(y, x)
- tensor([[1., 2.],
- [2., 4.],
- [3., 6.]])
- """,
- )
- add_docstr(
- torch.repeat_interleave,
- r"""
- repeat_interleave(input, repeats, dim=None, *, output_size=None) -> Tensor
- Repeat elements of a tensor.
- .. warning::
- This is different from :meth:`torch.Tensor.repeat` but similar to ``numpy.repeat``.
- Args:
- {input}
- repeats (Tensor or int): The number of repetitions for each element.
- repeats is broadcasted to fit the shape of the given axis.
- dim (int, optional): The dimension along which to repeat values.
- By default, use the flattened input array, and return a flat output
- array.
- Keyword args:
- output_size (int, optional): Total output size for the given axis
- ( e.g. sum of repeats). If given, it will avoid stream synchronization
- needed to calculate output shape of the tensor.
- Returns:
- Tensor: Repeated tensor which has the same shape as input, except along the given axis.
- Example::
- >>> x = torch.tensor([1, 2, 3])
- >>> x.repeat_interleave(2)
- tensor([1, 1, 2, 2, 3, 3])
- >>> y = torch.tensor([[1, 2], [3, 4]])
- >>> torch.repeat_interleave(y, 2)
- tensor([1, 1, 2, 2, 3, 3, 4, 4])
- >>> torch.repeat_interleave(y, 3, dim=1)
- tensor([[1, 1, 1, 2, 2, 2],
- [3, 3, 3, 4, 4, 4]])
- >>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0)
- tensor([[1, 2],
- [3, 4],
- [3, 4]])
- >>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0, output_size=3)
- tensor([[1, 2],
- [3, 4],
- [3, 4]])
- .. function:: repeat_interleave(repeats, *, output_size=None) -> Tensor
- :noindex:
- If the `repeats` is `tensor([n1, n2, n3, ...])`, then the output will be
- `tensor([0, 0, ..., 1, 1, ..., 2, 2, ..., ...])` where `0` appears `n1` times,
- `1` appears `n2` times, `2` appears `n3` times, etc.
- """.format(
- **common_args
- ),
- )
- add_docstr(
- torch.tile,
- r"""
- tile(input, dims) -> Tensor
- Constructs a tensor by repeating the elements of :attr:`input`.
- The :attr:`dims` argument specifies the number of repetitions
- in each dimension.
- If :attr:`dims` specifies fewer dimensions than :attr:`input` has, then
- ones are prepended to :attr:`dims` until all dimensions are specified.
- For example, if :attr:`input` has shape (8, 6, 4, 2) and :attr:`dims`
- is (2, 2), then :attr:`dims` is treated as (1, 1, 2, 2).
- Analogously, if :attr:`input` has fewer dimensions than :attr:`dims`
- specifies, then :attr:`input` is treated as if it were unsqueezed at
- dimension zero until it has as many dimensions as :attr:`dims` specifies.
- For example, if :attr:`input` has shape (4, 2) and :attr:`dims`
- is (3, 3, 2, 2), then :attr:`input` is treated as if it had the
- shape (1, 1, 4, 2).
- .. note::
- This function is similar to NumPy's tile function.
- Args:
- input (Tensor): the tensor whose elements to repeat.
- dims (tuple): the number of repetitions per dimension.
- Example::
- >>> x = torch.tensor([1, 2, 3])
- >>> x.tile((2,))
- tensor([1, 2, 3, 1, 2, 3])
- >>> y = torch.tensor([[1, 2], [3, 4]])
- >>> torch.tile(y, (2, 2))
- tensor([[1, 2, 1, 2],
- [3, 4, 3, 4],
- [1, 2, 1, 2],
- [3, 4, 3, 4]])
- """,
- )
- add_docstr(
- torch.quantize_per_tensor,
- r"""
- quantize_per_tensor(input, scale, zero_point, dtype) -> Tensor
- Converts a float tensor to a quantized tensor with given scale and zero point.
- Arguments:
- input (Tensor): float tensor or list of tensors to quantize
- scale (float or Tensor): scale to apply in quantization formula
- zero_point (int or Tensor): offset in integer value that maps to float zero
- dtype (:class:`torch.dtype`): the desired data type of returned tensor.
- Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32``
- Returns:
- Tensor: A newly quantized tensor or list of quantized tensors.
- Example::
- >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8)
- tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8,
- quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10)
- >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8).int_repr()
- tensor([ 0, 10, 20, 30], dtype=torch.uint8)
- >>> torch.quantize_per_tensor([torch.tensor([-1.0, 0.0]), torch.tensor([-2.0, 2.0])],
- >>> torch.tensor([0.1, 0.2]), torch.tensor([10, 20]), torch.quint8)
- (tensor([-1., 0.], size=(2,), dtype=torch.quint8,
- quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10),
- tensor([-2., 2.], size=(2,), dtype=torch.quint8,
- quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=20))
- >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), torch.tensor(0.1), torch.tensor(10), torch.quint8)
- tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8,
- quantization_scheme=torch.per_tensor_affine, scale=0.10, zero_point=10)
- """,
- )
- add_docstr(
- torch.quantize_per_tensor_dynamic,
- r"""
- quantize_per_tensor_dynamic(input, dtype, reduce_range) -> Tensor
- Converts a float tensor to a quantized tensor with scale and zero_point calculated
- dynamically based on the input.
- Arguments:
- input (Tensor): float tensor or list of tensors to quantize
- dtype (:class:`torch.dtype`): the desired data type of returned tensor.
- Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``
- reduce_range (bool): a flag to indicate whether to reduce the range of quantized
- data by 1 bit, it's required to avoid instruction overflow for some hardwares
- Returns:
- Tensor: A newly (dynamically) quantized tensor
- Example::
- >>> t = torch.quantize_per_tensor_dynamic(torch.tensor([-1.0, 0.0, 1.0, 2.0]), torch.quint8, False)
- >>> print(t)
- tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8,
- quantization_scheme=torch.per_tensor_affine, scale=0.011764705882352941,
- zero_point=85)
- >>> t.int_repr()
- tensor([ 0, 85, 170, 255], dtype=torch.uint8)
- """,
- )
- add_docstr(
- torch.quantize_per_channel,
- r"""
- quantize_per_channel(input, scales, zero_points, axis, dtype) -> Tensor
- Converts a float tensor to a per-channel quantized tensor with given scales and zero points.
- Arguments:
- input (Tensor): float tensor to quantize
- scales (Tensor): float 1D tensor of scales to use, size should match ``input.size(axis)``
- zero_points (int): integer 1D tensor of offset to use, size should match ``input.size(axis)``
- axis (int): dimension on which apply per-channel quantization
- dtype (:class:`torch.dtype`): the desired data type of returned tensor.
- Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32``
- Returns:
- Tensor: A newly quantized tensor
- Example::
- >>> x = torch.tensor([[-1.0, 0.0], [1.0, 2.0]])
- >>> torch.quantize_per_channel(x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8)
- tensor([[-1., 0.],
- [ 1., 2.]], size=(2, 2), dtype=torch.quint8,
- quantization_scheme=torch.per_channel_affine,
- scale=tensor([0.1000, 0.0100], dtype=torch.float64),
- zero_point=tensor([10, 0]), axis=0)
- >>> torch.quantize_per_channel(x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8).int_repr()
- tensor([[ 0, 10],
- [100, 200]], dtype=torch.uint8)
- """,
- )
- add_docstr(
- torch.quantized_batch_norm,
- r"""
- quantized_batch_norm(input, weight=None, bias=None, mean, var, eps, output_scale, output_zero_point) -> Tensor
- Applies batch normalization on a 4D (NCHW) quantized tensor.
- .. math::
- y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
- Arguments:
- input (Tensor): quantized tensor
- weight (Tensor): float tensor that corresponds to the gamma, size C
- bias (Tensor): float tensor that corresponds to the beta, size C
- mean (Tensor): float mean value in batch normalization, size C
- var (Tensor): float tensor for variance, size C
- eps (float): a value added to the denominator for numerical stability.
- output_scale (float): output quantized tensor scale
- output_zero_point (int): output quantized tensor zero_point
- Returns:
- Tensor: A quantized tensor with batch normalization applied.
- Example::
- >>> qx = torch.quantize_per_tensor(torch.rand(2, 2, 2, 2), 1.5, 3, torch.quint8)
- >>> torch.quantized_batch_norm(qx, torch.ones(2), torch.zeros(2), torch.rand(2), torch.rand(2), 0.00001, 0.2, 2)
- tensor([[[[-0.2000, -0.2000],
- [ 1.6000, -0.2000]],
- [[-0.4000, -0.4000],
- [-0.4000, 0.6000]]],
- [[[-0.2000, -0.2000],
- [-0.2000, -0.2000]],
- [[ 0.6000, -0.4000],
- [ 0.6000, -0.4000]]]], size=(2, 2, 2, 2), dtype=torch.quint8,
- quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=2)
- """,
- )
- add_docstr(
- torch.quantized_max_pool1d,
- r"""
- quantized_max_pool1d(input, kernel_size, stride=[], padding=0, dilation=1, ceil_mode=False) -> Tensor
- Applies a 1D max pooling over an input quantized tensor composed of several input planes.
- Arguments:
- input (Tensor): quantized tensor
- kernel_size (list of int): the size of the sliding window
- stride (``list of int``, optional): the stride of the sliding window
- padding (``list of int``, optional): padding to be added on both sides, must be >= 0 and <= kernel_size / 2
- dilation (``list of int``, optional): The stride between elements within a sliding window, must be > 0. Default 1
- ceil_mode (bool, optional): If True, will use ceil instead of floor to compute the output shape.
- Defaults to False.
- Returns:
- Tensor: A quantized tensor with max_pool1d applied.
- Example::
- >>> qx = torch.quantize_per_tensor(torch.rand(2, 2), 1.5, 3, torch.quint8)
- >>> torch.quantized_max_pool1d(qx, [2])
- tensor([[0.0000],
- [1.5000]], size=(2, 1), dtype=torch.quint8,
- quantization_scheme=torch.per_tensor_affine, scale=1.5, zero_point=3)
- """,
- )
- add_docstr(
- torch.quantized_max_pool2d,
- r"""
- quantized_max_pool2d(input, kernel_size, stride=[], padding=0, dilation=1, ceil_mode=False) -> Tensor
- Applies a 2D max pooling over an input quantized tensor composed of several input planes.
- Arguments:
- input (Tensor): quantized tensor
- kernel_size (``list of int``): the size of the sliding window
- stride (``list of int``, optional): the stride of the sliding window
- padding (``list of int``, optional): padding to be added on both sides, must be >= 0 and <= kernel_size / 2
- dilation (``list of int``, optional): The stride between elements within a sliding window, must be > 0. Default 1
- ceil_mode (bool, optional): If True, will use ceil instead of floor to compute the output shape.
- Defaults to False.
- Returns:
- Tensor: A quantized tensor with max_pool2d applied.
- Example::
- >>> qx = torch.quantize_per_tensor(torch.rand(2, 2, 2, 2), 1.5, 3, torch.quint8)
- >>> torch.quantized_max_pool2d(qx, [2,2])
- tensor([[[[1.5000]],
- [[1.5000]]],
- [[[0.0000]],
- [[0.0000]]]], size=(2, 2, 1, 1), dtype=torch.quint8,
- quantization_scheme=torch.per_tensor_affine, scale=1.5, zero_point=3)
- """,
- )
- add_docstr(
- torch.Generator,
- r"""
- Generator(device='cpu') -> Generator
- Creates and returns a generator object that manages the state of the algorithm which
- produces pseudo random numbers. Used as a keyword argument in many :ref:`inplace-random-sampling`
- functions.
- Arguments:
- device (:class:`torch.device`, optional): the desired device for the generator.
- Returns:
- Generator: An torch.Generator object.
- Example::
- >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
- >>> g_cpu = torch.Generator()
- >>> g_cuda = torch.Generator(device='cuda')
- """,
- )
- add_docstr(
- torch.Generator.set_state,
- r"""
- Generator.set_state(new_state) -> void
- Sets the Generator state.
- Arguments:
- new_state (torch.ByteTensor): The desired state.
- Example::
- >>> g_cpu = torch.Generator()
- >>> g_cpu_other = torch.Generator()
- >>> g_cpu.set_state(g_cpu_other.get_state())
- """,
- )
- add_docstr(
- torch.Generator.get_state,
- r"""
- Generator.get_state() -> Tensor
- Returns the Generator state as a ``torch.ByteTensor``.
- Returns:
- Tensor: A ``torch.ByteTensor`` which contains all the necessary bits
- to restore a Generator to a specific point in time.
- Example::
- >>> g_cpu = torch.Generator()
- >>> g_cpu.get_state()
- """,
- )
- add_docstr(
- torch.Generator.manual_seed,
- r"""
- Generator.manual_seed(seed) -> Generator
- Sets the seed for generating random numbers. Returns a `torch.Generator` object.
- It is recommended to set a large seed, i.e. a number that has a good balance of 0
- and 1 bits. Avoid having many 0 bits in the seed.
- Arguments:
- seed (int): The desired seed. Value must be within the inclusive range
- `[-0x8000_0000_0000_0000, 0xffff_ffff_ffff_ffff]`. Otherwise, a RuntimeError
- is raised. Negative inputs are remapped to positive values with the formula
- `0xffff_ffff_ffff_ffff + seed`.
- Returns:
- Generator: An torch.Generator object.
- Example::
- >>> g_cpu = torch.Generator()
- >>> g_cpu.manual_seed(2147483647)
- """,
- )
- add_docstr(
- torch.Generator.initial_seed,
- r"""
- Generator.initial_seed() -> int
- Returns the initial seed for generating random numbers.
- Example::
- >>> g_cpu = torch.Generator()
- >>> g_cpu.initial_seed()
- 2147483647
- """,
- )
- add_docstr(
- torch.Generator.seed,
- r"""
- Generator.seed() -> int
- Gets a non-deterministic random number from std::random_device or the current
- time and uses it to seed a Generator.
- Example::
- >>> g_cpu = torch.Generator()
- >>> g_cpu.seed()
- 1516516984916
- """,
- )
- add_docstr(
- torch.Generator.device,
- r"""
- Generator.device -> device
- Gets the current device of the generator.
- Example::
- >>> g_cpu = torch.Generator()
- >>> g_cpu.device
- device(type='cpu')
- """,
- )
- add_docstr(
- torch._assert_async,
- r"""
- _assert_async(tensor) -> void
- Asynchronously assert that the contents of tensor are nonzero. For CPU tensors,
- this is equivalent to ``assert tensor`` or ``assert tensor.is_nonzero()``; for
- CUDA tensors, we DO NOT synchronize and you may only find out the assertion
- failed at a later CUDA kernel launch. Asynchronous assertion can be helpful for
- testing invariants in CUDA tensors without giving up performance. This function
- is NOT intended to be used for regular error checking, as it will trash your CUDA
- context if the assert fails (forcing you to restart your PyTorch process.)
- Args:
- tensor (Tensor): a one element tensor to test to see if it is nonzero. Zero
- elements (including False for boolean tensors) cause an assertion failure
- to be raised.
- """,
- )
- add_docstr(
- torch.searchsorted,
- r"""
- searchsorted(sorted_sequence, values, *, out_int32=False, right=False, side='left', out=None, sorter=None) -> Tensor
- Find the indices from the *innermost* dimension of :attr:`sorted_sequence` such that, if the
- corresponding values in :attr:`values` were inserted before the indices, when sorted, the order
- of the corresponding *innermost* dimension within :attr:`sorted_sequence` would be preserved.
- Return a new tensor with the same size as :attr:`values`. If :attr:`right` is False or side is
- 'left (default), then the left boundary of :attr:`sorted_sequence` is closed. More formally,
- the returned index satisfies the following rules:
- .. list-table::
- :widths: 12 10 78
- :header-rows: 1
- * - :attr:`sorted_sequence`
- - :attr:`right`
- - *returned index satisfies*
- * - 1-D
- - False
- - ``sorted_sequence[i-1] < values[m][n]...[l][x] <= sorted_sequence[i]``
- * - 1-D
- - True
- - ``sorted_sequence[i-1] <= values[m][n]...[l][x] < sorted_sequence[i]``
- * - N-D
- - False
- - ``sorted_sequence[m][n]...[l][i-1] < values[m][n]...[l][x] <= sorted_sequence[m][n]...[l][i]``
- * - N-D
- - True
- - ``sorted_sequence[m][n]...[l][i-1] <= values[m][n]...[l][x] < sorted_sequence[m][n]...[l][i]``
- Args:
- sorted_sequence (Tensor): N-D or 1-D tensor, containing monotonically increasing sequence on the *innermost*
- dimension unless :attr:`sorter` is provided, in which case the sequence does not
- need to be sorted
- values (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s).
- Keyword args:
- out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise.
- Default value is False, i.e. default output data type is torch.int64.
- right (bool, optional): if False, return the first suitable location that is found. If True, return the
- last such index. If no suitable index found, return 0 for non-numerical value
- (eg. nan, inf) or the size of *innermost* dimension within :attr:`sorted_sequence`
- (one pass the last index of the *innermost* dimension). In other words, if False,
- gets the lower bound index for each value in :attr:`values` on the corresponding
- *innermost* dimension of the :attr:`sorted_sequence`. If True, gets the upper
- bound index instead. Default value is False. :attr:`side` does the same and is
- preferred. It will error if :attr:`side` is set to "left" while this is True.
- side (str, optional): the same as :attr:`right` but preferred. "left" corresponds to False for :attr:`right`
- and "right" corresponds to True for :attr:`right`. It will error if this is set to
- "left" while :attr:`right` is True.
- out (Tensor, optional): the output tensor, must be the same size as :attr:`values` if provided.
- sorter (LongTensor, optional): if provided, a tensor matching the shape of the unsorted
- :attr:`sorted_sequence` containing a sequence of indices that sort it in the
- ascending order on the innermost dimension
- Example::
- >>> sorted_sequence = torch.tensor([[1, 3, 5, 7, 9], [2, 4, 6, 8, 10]])
- >>> sorted_sequence
- tensor([[ 1, 3, 5, 7, 9],
- [ 2, 4, 6, 8, 10]])
- >>> values = torch.tensor([[3, 6, 9], [3, 6, 9]])
- >>> values
- tensor([[3, 6, 9],
- [3, 6, 9]])
- >>> torch.searchsorted(sorted_sequence, values)
- tensor([[1, 3, 4],
- [1, 2, 4]])
- >>> torch.searchsorted(sorted_sequence, values, side='right')
- tensor([[2, 3, 5],
- [1, 3, 4]])
- >>> sorted_sequence_1d = torch.tensor([1, 3, 5, 7, 9])
- >>> sorted_sequence_1d
- tensor([1, 3, 5, 7, 9])
- >>> torch.searchsorted(sorted_sequence_1d, values)
- tensor([[1, 3, 4],
- [1, 3, 4]])
- """,
- )
- add_docstr(
- torch.bucketize,
- r"""
- bucketize(input, boundaries, *, out_int32=False, right=False, out=None) -> Tensor
- Returns the indices of the buckets to which each value in the :attr:`input` belongs, where the
- boundaries of the buckets are set by :attr:`boundaries`. Return a new tensor with the same size
- as :attr:`input`. If :attr:`right` is False (default), then the left boundary is closed. More
- formally, the returned index satisfies the following rules:
- .. list-table::
- :widths: 15 85
- :header-rows: 1
- * - :attr:`right`
- - *returned index satisfies*
- * - False
- - ``boundaries[i-1] < input[m][n]...[l][x] <= boundaries[i]``
- * - True
- - ``boundaries[i-1] <= input[m][n]...[l][x] < boundaries[i]``
- Args:
- input (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s).
- boundaries (Tensor): 1-D tensor, must contain a strictly increasing sequence, or the return value is undefined.
- Keyword args:
- out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise.
- Default value is False, i.e. default output data type is torch.int64.
- right (bool, optional): if False, return the first suitable location that is found. If True, return the
- last such index. If no suitable index found, return 0 for non-numerical value
- (eg. nan, inf) or the size of :attr:`boundaries` (one pass the last index).
- In other words, if False, gets the lower bound index for each value in :attr:`input`
- from :attr:`boundaries`. If True, gets the upper bound index instead.
- Default value is False.
- out (Tensor, optional): the output tensor, must be the same size as :attr:`input` if provided.
- Example::
- >>> boundaries = torch.tensor([1, 3, 5, 7, 9])
- >>> boundaries
- tensor([1, 3, 5, 7, 9])
- >>> v = torch.tensor([[3, 6, 9], [3, 6, 9]])
- >>> v
- tensor([[3, 6, 9],
- [3, 6, 9]])
- >>> torch.bucketize(v, boundaries)
- tensor([[1, 3, 4],
- [1, 3, 4]])
- >>> torch.bucketize(v, boundaries, right=True)
- tensor([[2, 3, 5],
- [2, 3, 5]])
- """,
- )
- add_docstr(
- torch.view_as_real_copy,
- r"""
- Performs the same operation as :func:`torch.view_as_real`, but all output tensors
- are freshly created instead of aliasing the input.
- """,
- )
- add_docstr(
- torch.view_as_complex_copy,
- r"""
- Performs the same operation as :func:`torch.view_as_complex`, but all output tensors
- are freshly created instead of aliasing the input.
- """,
- )
- add_docstr(
- torch.as_strided_copy,
- r"""
- Performs the same operation as :func:`torch.as_strided`, but all output tensors
- are freshly created instead of aliasing the input.
- """,
- )
- add_docstr(
- torch.diagonal_copy,
- r"""
- Performs the same operation as :func:`torch.diagonal`, but all output tensors
- are freshly created instead of aliasing the input.
- """,
- )
- add_docstr(
- torch.expand_copy,
- r"""
- Performs the same operation as :func:`torch.expand`, but all output tensors
- are freshly created instead of aliasing the input.
- """,
- )
- add_docstr(
- torch.permute_copy,
- r"""
- Performs the same operation as :func:`torch.permute`, but all output tensors
- are freshly created instead of aliasing the input.
- """,
- )
- add_docstr(
- torch.select_copy,
- r"""
- Performs the same operation as :func:`torch.select`, but all output tensors
- are freshly created instead of aliasing the input.
- """,
- )
- add_docstr(
- torch.detach_copy,
- r"""
- Performs the same operation as :func:`torch.detach`, but all output tensors
- are freshly created instead of aliasing the input.
- """,
- )
- add_docstr(
- torch.slice_copy,
- r"""
- Performs the same operation as :func:`torch.slice`, but all output tensors
- are freshly created instead of aliasing the input.
- """,
- )
- add_docstr(
- torch.split_copy,
- r"""
- Performs the same operation as :func:`torch.split`, but all output tensors
- are freshly created instead of aliasing the input.
- """,
- )
- add_docstr(
- torch.split_with_sizes_copy,
- r"""
- Performs the same operation as :func:`torch.split_with_sizes`, but all output tensors
- are freshly created instead of aliasing the input.
- """,
- )
- add_docstr(
- torch.squeeze_copy,
- r"""
- Performs the same operation as :func:`torch.squeeze`, but all output tensors
- are freshly created instead of aliasing the input.
- """,
- )
- add_docstr(
- torch.t_copy,
- r"""
- Performs the same operation as :func:`torch.t`, but all output tensors
- are freshly created instead of aliasing the input.
- """,
- )
- add_docstr(
- torch.transpose_copy,
- r"""
- Performs the same operation as :func:`torch.transpose`, but all output tensors
- are freshly created instead of aliasing the input.
- """,
- )
- add_docstr(
- torch.unsqueeze_copy,
- r"""
- Performs the same operation as :func:`torch.unsqueeze`, but all output tensors
- are freshly created instead of aliasing the input.
- """,
- )
- add_docstr(
- torch.indices_copy,
- r"""
- Performs the same operation as :func:`torch.indices`, but all output tensors
- are freshly created instead of aliasing the input.
- """,
- )
- add_docstr(
- torch.values_copy,
- r"""
- Performs the same operation as :func:`torch.values`, but all output tensors
- are freshly created instead of aliasing the input.
- """,
- )
- add_docstr(
- torch.crow_indices_copy,
- r"""
- Performs the same operation as :func:`torch.crow_indices`, but all output tensors
- are freshly created instead of aliasing the input.
- """,
- )
- add_docstr(
- torch.col_indices_copy,
- r"""
- Performs the same operation as :func:`torch.col_indices`, but all output tensors
- are freshly created instead of aliasing the input.
- """,
- )
- add_docstr(
- torch.unbind_copy,
- r"""
- Performs the same operation as :func:`torch.unbind`, but all output tensors
- are freshly created instead of aliasing the input.
- """,
- )
- add_docstr(
- torch.view_copy,
- r"""
- Performs the same operation as :func:`torch.view`, but all output tensors
- are freshly created instead of aliasing the input.
- """,
- )
- add_docstr(
- torch.unfold_copy,
- r"""
- Performs the same operation as :func:`torch.unfold`, but all output tensors
- are freshly created instead of aliasing the input.
- """,
- )
- add_docstr(
- torch.alias_copy,
- r"""
- Performs the same operation as :func:`torch.alias`, but all output tensors
- are freshly created instead of aliasing the input.
- """,
- )
- for unary_base_func_name in (
- "exp",
- "sqrt",
- "abs",
- "acos",
- "asin",
- "atan",
- "ceil",
- "cos",
- "cosh",
- "erf",
- "erfc",
- "expm1",
- "floor",
- "log",
- "log10",
- "log1p",
- "log2",
- "neg",
- "tan",
- "tanh",
- "sin",
- "sinh",
- "round",
- "lgamma",
- "frac",
- "reciprocal",
- "sigmoid",
- "trunc",
- "zero",
- ):
- unary_foreach_func_name = f"_foreach_{unary_base_func_name}"
- if hasattr(torch, unary_foreach_func_name):
- add_docstr(
- getattr(torch, unary_foreach_func_name),
- r"""
- {}(self: List[Tensor]) -> List[Tensor]
- Apply :func:`torch.{}` to each Tensor of the input list.
- """.format(
- unary_foreach_func_name, unary_base_func_name
- ),
- )
- unary_inplace_foreach_func_name = f"{unary_foreach_func_name}_"
- if hasattr(torch, unary_inplace_foreach_func_name):
- add_docstr(
- getattr(torch, unary_inplace_foreach_func_name),
- r"""
- {}(self: List[Tensor]) -> None
- Apply :func:`torch.{}` to each Tensor of the input list.
- """.format(
- unary_inplace_foreach_func_name, unary_base_func_name
- ),
- )
|