12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906109071090810909109101091110912109131091410915109161091710918109191092010921109221092310924109251092610927109281092910930109311093210933109341093510936109371093810939109401094110942109431094410945109461094710948109491095010951109521095310954109551095610957109581095910960109611096210963109641096510966109671096810969109701097110972109731097410975109761097710978109791098010981109821098310984109851098610987109881098910990109911099210993109941099510996109971099810999110001100111002110031100411005110061100711008110091101011011110121101311014110151101611017110181101911020110211102211023110241102511026110271102811029110301103111032110331103411035110361103711038110391104011041110421104311044110451104611047110481104911050110511105211053110541105511056110571105811059110601106111062110631106411065110661106711068110691107011071110721107311074110751107611077110781107911080110811108211083110841108511086110871108811089110901109111092110931109411095110961109711098110991110011101111021110311104111051110611107111081110911110111111111211113111141111511116111171111811119111201112111122111231112411125111261112711128111291113011131111321113311134111351113611137111381113911140111411114211143111441114511146111471114811149111501115111152111531115411155111561115711158111591116011161111621116311164111651116611167111681116911170111711117211173111741117511176111771117811179111801118111182111831118411185111861118711188111891119011191111921119311194111951119611197111981119911200112011120211203112041120511206112071120811209112101121111212112131121411215112161121711218112191122011221112221122311224112251122611227112281122911230112311123211233112341123511236112371123811239112401124111242112431124411245112461124711248112491125011251112521125311254112551125611257112581125911260112611126211263112641126511266112671126811269112701127111272112731127411275112761127711278112791128011281112821128311284112851128611287112881128911290112911129211293112941129511296112971129811299113001130111302113031130411305113061130711308113091131011311113121131311314113151131611317113181131911320113211132211323113241132511326113271132811329113301133111332113331133411335113361133711338113391134011341113421134311344113451134611347113481134911350113511135211353113541135511356113571135811359113601136111362113631136411365113661136711368113691137011371113721137311374113751137611377113781137911380113811138211383113841138511386113871138811389113901139111392113931139411395113961139711398113991140011401114021140311404114051140611407114081140911410114111141211413114141141511416114171141811419114201142111422114231142411425114261142711428114291143011431114321143311434114351143611437114381143911440114411144211443114441144511446114471144811449114501145111452114531145411455114561145711458114591146011461114621146311464114651146611467114681146911470114711147211473114741147511476114771147811479114801148111482114831148411485114861148711488114891149011491114921149311494114951149611497114981149911500115011150211503115041150511506115071150811509115101151111512115131151411515115161151711518115191152011521115221152311524115251152611527115281152911530115311153211533115341153511536115371153811539115401154111542115431154411545115461154711548115491155011551115521155311554115551155611557115581155911560115611156211563115641156511566115671156811569115701157111572115731157411575115761157711578115791158011581115821158311584115851158611587115881158911590115911159211593115941159511596115971159811599116001160111602116031160411605116061160711608116091161011611116121161311614116151161611617116181161911620116211162211623116241162511626116271162811629116301163111632116331163411635116361163711638116391164011641116421164311644116451164611647116481164911650116511165211653116541165511656116571165811659116601166111662116631166411665116661166711668116691167011671116721167311674116751167611677116781167911680116811168211683116841168511686116871168811689116901169111692116931169411695116961169711698116991170011701117021170311704117051170611707117081170911710117111171211713117141171511716117171171811719117201172111722117231172411725117261172711728117291173011731117321173311734117351173611737117381173911740117411174211743117441174511746117471174811749117501175111752117531175411755117561175711758117591176011761117621176311764117651176611767117681176911770117711177211773117741177511776117771177811779117801178111782117831178411785117861178711788117891179011791117921179311794117951179611797117981179911800118011180211803118041180511806118071180811809118101181111812118131181411815118161181711818118191182011821118221182311824118251182611827118281182911830118311183211833118341183511836118371183811839118401184111842118431184411845118461184711848118491185011851118521185311854118551185611857118581185911860118611186211863118641186511866118671186811869118701187111872118731187411875118761187711878118791188011881118821188311884118851188611887118881188911890118911189211893118941189511896118971189811899119001190111902119031190411905119061190711908119091191011911119121191311914119151191611917119181191911920119211192211923119241192511926119271192811929119301193111932119331193411935119361193711938119391194011941119421194311944119451194611947119481194911950119511195211953119541195511956119571195811959119601196111962119631196411965119661196711968119691197011971119721197311974119751197611977119781197911980119811198211983119841198511986119871198811989119901199111992119931199411995119961199711998119991200012001120021200312004120051200612007120081200912010120111201212013120141201512016120171201812019120201202112022120231202412025120261202712028120291203012031120321203312034120351203612037120381203912040120411204212043120441204512046120471204812049120501205112052120531205412055120561205712058120591206012061120621206312064120651206612067120681206912070120711207212073120741207512076120771207812079120801208112082120831208412085120861208712088120891209012091120921209312094120951209612097120981209912100121011210212103121041210512106121071210812109121101211112112121131211412115121161211712118121191212012121121221212312124121251212612127121281212912130121311213212133121341213512136121371213812139121401214112142121431214412145121461214712148121491215012151121521215312154121551215612157121581215912160121611216212163121641216512166121671216812169121701217112172121731217412175121761217712178121791218012181121821218312184121851218612187121881218912190121911219212193121941219512196121971219812199122001220112202122031220412205122061220712208122091221012211122121221312214122151221612217122181221912220122211222212223122241222512226122271222812229122301223112232122331223412235122361223712238122391224012241122421224312244122451224612247122481224912250122511225212253122541225512256122571225812259122601226112262122631226412265122661226712268122691227012271122721227312274122751227612277122781227912280122811228212283122841228512286122871228812289122901229112292122931229412295122961229712298122991230012301123021230312304123051230612307123081230912310123111231212313123141231512316123171231812319123201232112322123231232412325123261232712328123291233012331123321233312334123351233612337123381233912340123411234212343123441234512346123471234812349123501235112352123531235412355123561235712358123591236012361123621236312364123651236612367123681236912370123711237212373123741237512376123771237812379123801238112382123831238412385123861238712388123891239012391123921239312394123951239612397123981239912400124011240212403124041240512406124071240812409124101241112412124131241412415124161241712418124191242012421124221242312424124251242612427124281242912430124311243212433124341243512436124371243812439124401244112442124431244412445124461244712448124491245012451124521245312454124551245612457124581245912460124611246212463124641246512466124671246812469124701247112472124731247412475124761247712478124791248012481124821248312484124851248612487124881248912490124911249212493124941249512496124971249812499125001250112502125031250412505125061250712508125091251012511125121251312514125151251612517125181251912520125211252212523125241252512526125271252812529125301253112532125331253412535125361253712538125391254012541125421254312544125451254612547125481254912550125511255212553125541255512556125571255812559125601256112562125631256412565125661256712568125691257012571125721257312574125751257612577125781257912580125811258212583125841258512586125871258812589125901259112592125931259412595125961259712598125991260012601126021260312604126051260612607126081260912610126111261212613126141261512616126171261812619126201262112622126231262412625126261262712628126291263012631126321263312634126351263612637126381263912640126411264212643126441264512646126471264812649126501265112652126531265412655126561265712658126591266012661126621266312664126651266612667126681266912670126711267212673126741267512676126771267812679126801268112682126831268412685126861268712688126891269012691126921269312694126951269612697126981269912700127011270212703127041270512706127071270812709127101271112712127131271412715127161271712718127191272012721127221272312724127251272612727127281272912730127311273212733127341273512736127371273812739127401274112742127431274412745127461274712748127491275012751127521275312754127551275612757127581275912760127611276212763127641276512766127671276812769127701277112772127731277412775127761277712778127791278012781127821278312784127851278612787127881278912790127911279212793127941279512796127971279812799128001280112802128031280412805128061280712808128091281012811128121281312814128151281612817128181281912820128211282212823128241282512826128271282812829128301283112832128331283412835128361283712838128391284012841128421284312844128451284612847128481284912850128511285212853128541285512856128571285812859128601286112862128631286412865128661286712868128691287012871128721287312874128751287612877128781287912880128811288212883128841288512886128871288812889128901289112892128931289412895128961289712898128991290012901129021290312904129051290612907129081290912910129111291212913129141291512916129171291812919129201292112922129231292412925129261292712928129291293012931129321293312934129351293612937129381293912940129411294212943129441294512946129471294812949129501295112952129531295412955129561295712958129591296012961129621296312964129651296612967129681296912970129711297212973129741297512976129771297812979129801298112982129831298412985129861298712988129891299012991129921299312994129951299612997129981299913000130011300213003130041300513006130071300813009130101301113012130131301413015130161301713018130191302013021130221302313024130251302613027130281302913030130311303213033130341303513036130371303813039130401304113042130431304413045130461304713048130491305013051130521305313054130551305613057130581305913060130611306213063130641306513066130671306813069130701307113072130731307413075130761307713078130791308013081130821308313084130851308613087130881308913090130911309213093130941309513096130971309813099131001310113102131031310413105131061310713108131091311013111131121311313114131151311613117131181311913120131211312213123131241312513126131271312813129131301313113132131331313413135131361313713138131391314013141131421314313144131451314613147131481314913150131511315213153131541315513156131571315813159131601316113162131631316413165131661316713168131691317013171131721317313174131751317613177131781317913180131811318213183131841318513186131871318813189131901319113192131931319413195131961319713198131991320013201132021320313204132051320613207132081320913210132111321213213132141321513216132171321813219132201322113222132231322413225132261322713228132291323013231132321323313234132351323613237132381323913240132411324213243132441324513246132471324813249132501325113252132531325413255132561325713258132591326013261132621326313264132651326613267132681326913270132711327213273132741327513276132771327813279132801328113282132831328413285132861328713288132891329013291132921329313294132951329613297132981329913300133011330213303133041330513306133071330813309133101331113312133131331413315133161331713318133191332013321133221332313324133251332613327133281332913330133311333213333133341333513336133371333813339133401334113342133431334413345133461334713348133491335013351133521335313354133551335613357133581335913360133611336213363133641336513366133671336813369133701337113372133731337413375133761337713378133791338013381133821338313384133851338613387133881338913390133911339213393133941339513396133971339813399134001340113402134031340413405134061340713408134091341013411134121341313414134151341613417134181341913420134211342213423134241342513426134271342813429134301343113432134331343413435134361343713438134391344013441134421344313444134451344613447134481344913450134511345213453134541345513456134571345813459134601346113462134631346413465134661346713468134691347013471134721347313474134751347613477134781347913480134811348213483134841348513486134871348813489134901349113492134931349413495134961349713498134991350013501135021350313504135051350613507135081350913510135111351213513135141351513516135171351813519135201352113522135231352413525135261352713528135291353013531135321353313534135351353613537135381353913540135411354213543135441354513546135471354813549135501355113552135531355413555135561355713558135591356013561135621356313564135651356613567135681356913570135711357213573135741357513576135771357813579135801358113582135831358413585135861358713588135891359013591135921359313594135951359613597135981359913600136011360213603136041360513606136071360813609136101361113612136131361413615136161361713618136191362013621136221362313624136251362613627136281362913630136311363213633136341363513636136371363813639136401364113642136431364413645136461364713648136491365013651136521365313654136551365613657136581365913660136611366213663136641366513666136671366813669136701367113672136731367413675136761367713678136791368013681136821368313684136851368613687136881368913690136911369213693136941369513696136971369813699137001370113702137031370413705137061370713708137091371013711137121371313714137151371613717137181371913720137211372213723137241372513726137271372813729137301373113732137331373413735137361373713738137391374013741137421374313744137451374613747137481374913750137511375213753137541375513756137571375813759137601376113762137631376413765137661376713768137691377013771137721377313774137751377613777137781377913780137811378213783137841378513786137871378813789137901379113792137931379413795137961379713798137991380013801138021380313804138051380613807138081380913810138111381213813138141381513816138171381813819138201382113822138231382413825138261382713828138291383013831138321383313834138351383613837138381383913840138411384213843138441384513846138471384813849138501385113852138531385413855138561385713858138591386013861138621386313864138651386613867138681386913870138711387213873138741387513876138771387813879138801388113882138831388413885138861388713888138891389013891138921389313894138951389613897138981389913900139011390213903139041390513906139071390813909139101391113912139131391413915139161391713918139191392013921139221392313924139251392613927139281392913930139311393213933139341393513936139371393813939139401394113942139431394413945139461394713948139491395013951139521395313954139551395613957139581395913960139611396213963139641396513966139671396813969139701397113972139731397413975139761397713978139791398013981139821398313984139851398613987139881398913990139911399213993139941399513996139971399813999140001400114002140031400414005140061400714008140091401014011140121401314014140151401614017140181401914020140211402214023140241402514026140271402814029140301403114032140331403414035140361403714038140391404014041140421404314044140451404614047140481404914050140511405214053140541405514056140571405814059140601406114062140631406414065140661406714068140691407014071140721407314074140751407614077140781407914080140811408214083140841408514086140871408814089140901409114092140931409414095140961409714098140991410014101141021410314104141051410614107141081410914110141111411214113141141411514116141171411814119141201412114122141231412414125141261412714128141291413014131141321413314134141351413614137141381413914140141411414214143141441414514146141471414814149141501415114152141531415414155141561415714158141591416014161141621416314164141651416614167141681416914170141711417214173141741417514176141771417814179141801418114182141831418414185141861418714188141891419014191141921419314194141951419614197141981419914200142011420214203142041420514206142071420814209142101421114212142131421414215142161421714218142191422014221142221422314224142251422614227142281422914230142311423214233142341423514236142371423814239142401424114242142431424414245142461424714248142491425014251142521425314254142551425614257142581425914260142611426214263142641426514266142671426814269142701427114272142731427414275142761427714278142791428014281142821428314284142851428614287142881428914290142911429214293142941429514296142971429814299143001430114302143031430414305143061430714308143091431014311143121431314314143151431614317143181431914320143211432214323143241432514326143271432814329143301433114332143331433414335143361433714338143391434014341143421434314344143451434614347143481434914350143511435214353143541435514356143571435814359143601436114362143631436414365143661436714368143691437014371143721437314374143751437614377143781437914380143811438214383143841438514386143871438814389143901439114392143931439414395143961439714398143991440014401144021440314404144051440614407144081440914410144111441214413144141441514416144171441814419144201442114422144231442414425144261442714428144291443014431144321443314434144351443614437144381443914440144411444214443144441444514446144471444814449144501445114452144531445414455144561445714458144591446014461144621446314464144651446614467144681446914470144711447214473144741447514476144771447814479144801448114482144831448414485144861448714488144891449014491144921449314494144951449614497144981449914500145011450214503145041450514506145071450814509145101451114512145131451414515145161451714518145191452014521145221452314524145251452614527145281452914530145311453214533145341453514536145371453814539145401454114542145431454414545145461454714548145491455014551145521455314554145551455614557145581455914560145611456214563145641456514566145671456814569145701457114572145731457414575145761457714578145791458014581145821458314584145851458614587145881458914590145911459214593145941459514596145971459814599146001460114602146031460414605146061460714608146091461014611 |
- # See README.md in this directory for more guidance
- # *********NB: _cast_* operators are DEPRECATED and will be removed
- # eventually. These were previously used before TorchScript IR supported
- # representing ScalarType's. They are now superseded by usage of
- # `aten::to()`. The ops remain here for backward compatibility purposes.
- # DEPRECATED. DO NOT USE
- - func: _cast_Byte(Tensor self, bool non_blocking=False) -> Tensor
- variants: function
- # DEPRECATED. DO NOT USE
- - func: _cast_Char(Tensor self, bool non_blocking=False) -> Tensor
- variants: function
- # DEPRECATED. DO NOT USE
- - func: _cast_Double(Tensor self, bool non_blocking=False) -> Tensor
- variants: function
- # DEPRECATED. DO NOT USE
- - func: _cast_Float(Tensor self, bool non_blocking=False) -> Tensor
- variants: function
- # DEPRECATED. DO NOT USE
- - func: _cast_Int(Tensor self, bool non_blocking=False) -> Tensor
- variants: function
- # DEPRECATED. DO NOT USE
- - func: _cast_Long(Tensor self, bool non_blocking=False) -> Tensor
- variants: function
- # DEPRECATED. DO NOT USE
- - func: _cast_Short(Tensor self, bool non_blocking=False) -> Tensor
- variants: function
- # DEPRECATED. DO NOT USE
- - func: _cast_Half(Tensor self, bool non_blocking=False) -> Tensor
- variants: function
- # Computes the gradient of current tensor w.r.t. graph leaves.
- - func: _backward(Tensor self, Tensor[] inputs, Tensor? gradient=None, bool? retain_graph=None, bool create_graph=False) -> ()
- manual_cpp_binding: True
- variants: method
- # DEPRECATED. Sets the tensor data held by this `Variable` to be the same as
- # `new_data`. It requires that `new_data` and `Variable` have compatible tensor
- # type, by checking `_has_compatible_shallow_copy_type(this, new_data)`.
- #
- # This function is deprecated because it doesn't really make sense in a world
- # where Variables *are* Tensors (as opposed to them containing tensors, which
- # is what the previous interpretation was.)
- - func: set_data(Tensor(a!) self, Tensor new_data) -> ()
- manual_cpp_binding: True
- variants: method
- - func: data(Tensor self) -> Tensor
- manual_cpp_binding: True
- variants: method
- # True if this `Variable` is a leaf and thus does not have a `grad_fn`.
- - func: is_leaf(Tensor self) -> bool
- manual_cpp_binding: True
- variants: method
- # Returns the output index of this variable from the forward operation that
- # produced it. Conversely, it returns the input index of the gradient `Node` to
- # which this `Variable` is connected (because in the gradient computation,
- # inputs and outputs switch meaning). For example:
- #
- # y0, y1, y2 = f(x)
- # assert y0.output_nr == 0
- # assert y1.output_nr == 1
- # assert y2.output_nr == 2
- #
- - func: output_nr(Tensor self) -> int
- manual_cpp_binding: True
- variants: method
- - func: _version(Tensor self) -> int
- manual_cpp_binding: True
- variants: method
- - func: requires_grad_(Tensor(a!) self, bool requires_grad=True) -> Tensor(a!)
- manual_cpp_binding: True
- variants: method
- # Enables .grad attribute for non-leaf Tensors.
- - func: retain_grad(Tensor(a!) self) -> ()
- manual_cpp_binding: True
- variants: method
- - func: retains_grad(Tensor self) -> bool
- manual_cpp_binding: True
- variants: method
- - func: _fw_primal(Tensor(a) self, int level) -> Tensor(a)
- variants: method
- dispatch:
- CompositeExplicitAutograd: _fw_primal
- - func: _make_dual(Tensor(a) primal, Tensor tangent, int level) -> Tensor(a)
- variants: function
- dispatch:
- CompositeExplicitAutograd: _make_dual
- - func: _unpack_dual(Tensor(a) dual, int level) -> (Tensor(a) primal, Tensor tangent)
- variants: function
- # NOTE: [_new_zeros_with_same_feature_meta]
- # This function creates a new tensor with the layout and TensorOptions
- # of `other` but also takes into account the batch dimensions of `self`
- #
- # This function has a couple extra constraints because it is also used for `jvp`
- # in functorch.
- # - is used for forward AD because there is the restriction
- # that the primal and tangent must have the same layout
- # - We cannot assume that `self` and `other` have the same sizes or even dim
- # because in the inplace over view case, `other` is the base tensor, and
- # `self` is the forward grad with respect to the view, which can have an
- # entirely different shape
- # - takes the number of batch dims for `self` because we also handle
- # some batching logic. We handle that here instead of a batching rule because
- # we'd like to avoid calling as_strided in the batching rule (as to enable
- # nested vmap in functorch).
- # - needs to be CompositeExplicitAutograd for jvp support in functorch.
- # functorch currently relies on TensorWrapper which does not have storage
- # CompositeExplicitAutograd makes sure the TensorWrapper is unwrapped.
- # - this function may eventually take on another int argument to store the
- # the number of batch dims for other once we support that use case
- - func: _new_zeros_with_same_feature_meta(Tensor self, Tensor other, *, int self_num_batch_dims=0) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutograd: _new_zeros_with_same_feature_meta
- autogen: _new_zeros_with_same_feature_meta.out
- # This function compares the storage numel of self with that of other, where
- # storage numel is cumputed as: `other.storage().nbytes() / other.itemsize()`.
- # We create this function for composite compliance purposes. The batching rule
- # always returns true because vmapped as_strided does not support accessing
- # storage locations not indexable by the input tensor.
- # See the note above for more information.
- - func: _has_same_storage_numel(Tensor self, Tensor other) -> bool
- variants: function
- dispatch:
- CompositeExplicitAutograd: _has_same_storage_numel
- - func: rename_(Tensor(a!) self, Dimname[]? names) -> Tensor(a!)
- variants: method
- tags: inplace_view
- - func: rename(Tensor(a) self, Dimname[]? names) -> Tensor(a)
- variants: method
- - func: align_to(Tensor(a) self, Dimname[] names) -> Tensor(a)
- variants: method
- - func: align_to.ellipsis_idx(Tensor(a) self, Dimname[] order, int ellipsis_idx) -> Tensor(a)
- variants: method
- - func: align_as(Tensor self, Tensor other) -> Tensor
- variants: method
- - func: align_tensors(Tensor[] tensors) -> Tensor[]
- # Not assert because it's a keyword; not Assert because FX already
- # took that syntax
- # TODO: need to specify this is side-effectful somehow
- - func: _assert_async(Tensor self) -> ()
- dispatch:
- CPU: _assert_async_cpu
- CUDA: _assert_async_cuda
- - func: _assert_tensor_metadata(Tensor a, int[]? size=None, int[]? stride=None, ScalarType? dtype=None) -> ()
- - func: refine_names(Tensor(a) self, Dimname[] names) -> Tensor(a)
- variants: method
- - func: _use_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank) -> bool
- device_check: NoCheck # Tensor arguments allowed to be on different devices, see also _cudnn_ctc_loss
- dispatch:
- CUDA: _use_cudnn_ctc_loss
- - func: _use_cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank) -> bool
- device_check: NoCheck # Tensor arguments allowed to be on different devices, see also _cudnn_ctc_loss
- dispatch:
- CUDA: _use_cudnn_ctc_loss_tensor
- - func: _cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor)
- device_check: NoCheck # log_probs is expected to be on CUDA while targets is expected to be on CPU
- dispatch:
- CUDA: _cudnn_ctc_loss
- autogen: _cudnn_ctc_loss.out
- - func: _cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor)
- device_check: NoCheck # log_probs is expected to be on CUDA while targets is expected to be on CPU
- dispatch:
- CUDA: _cudnn_ctc_loss_tensor
- - func: _use_cudnn_rnn_flatten_weight() -> bool
- - func: _cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor
- dispatch:
- CUDA: _cudnn_rnn_flatten_weight
- autogen: _cudnn_rnn_flatten_weight.out
- - func: _cudnn_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
- # rnn_tanh may or may not redispatch to _cudnn_rnn based on algorithm and build. Thus it might hit dispatch or kernel device check.
- # Disable dispatch time device check for consistent behavior.
- device_check: NoCheck
- dispatch:
- CUDA: _cudnn_rnn
- autogen: _cudnn_rnn.out
- - func: _cudnn_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])
- dispatch:
- CUDA: _cudnn_rnn_backward
- autogen: _cudnn_rnn_backward.out
- - func: _cudnn_init_dropout_state(float dropout, bool train, int dropout_seed, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
- dispatch:
- CUDA: _cudnn_init_dropout_state
- autogen: _cudnn_init_dropout_state.out
- - func: _debug_has_internal_overlap(Tensor self) -> int
- variants: function
- - func: _fused_dropout(Tensor self, float p, Generator? generator=None) -> (Tensor, Tensor)
- variants: function
- dispatch:
- CUDA: fused_dropout_cuda
- tags: nondeterministic_seeded
- autogen: _fused_dropout.out
- - func: _masked_scale(Tensor self, Tensor mask, float scale) -> Tensor
- variants: function
- dispatch:
- CUDA: masked_scale_cuda
- autogen: _masked_scale.out
- - func: native_dropout(Tensor input, float p, bool? train) -> (Tensor, Tensor)
- variants: function
- dispatch:
- CPU: native_dropout_cpu
- CUDA: native_dropout_cuda
- NestedTensorCPU, NestedTensorCUDA: native_dropout_nested
- tags: [nondeterministic_seeded, core]
- autogen: native_dropout.out
- - func: native_dropout_backward(Tensor grad_output, Tensor mask, float scale) -> Tensor
- dispatch:
- CPU, NestedTensorCPU, NestedTensorCUDA: native_dropout_backward
- CUDA: native_dropout_backward_cuda
- autogen: native_dropout_backward.out
- tags: pointwise
- - func: _sobol_engine_draw(Tensor quasi, int n, Tensor sobolstate, int dimension, int num_generated, ScalarType? dtype) -> (Tensor, Tensor)
- - func: _sobol_engine_ff_(Tensor(a!) self, int n, Tensor sobolstate, int dimension, int num_generated) -> Tensor(a!)
- - func: _sobol_engine_scramble_(Tensor(a!) self, Tensor ltm, int dimension) -> Tensor(a!)
- - func: _sobol_engine_initialize_state_(Tensor(a!) self, int dimension) -> Tensor(a!)
- - func: _reshape_from_tensor(Tensor self, Tensor shape) -> Tensor
- - func: _shape_as_tensor(Tensor self) -> Tensor
- - func: dropout(Tensor input, float p, bool train) -> Tensor
- tags: nondeterministic_seeded
- - func: dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
- tags: nondeterministic_seeded
- - func: feature_dropout(Tensor input, float p, bool train) -> Tensor
- tags: nondeterministic_seeded
- - func: feature_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
- tags: nondeterministic_seeded
- - func: alpha_dropout(Tensor input, float p, bool train) -> Tensor
- tags: nondeterministic_seeded
- - func: alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
- tags: nondeterministic_seeded
- - func: feature_alpha_dropout(Tensor input, float p, bool train) -> Tensor
- tags: nondeterministic_seeded
- - func: feature_alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
- tags: nondeterministic_seeded
- - func: abs(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: abs
- SparseCPU, SparseCUDA: abs_sparse
- SparseCsrCPU, SparseCsrCUDA: abs_sparse_csr
- tags: [core, pointwise]
- - func: abs_(Tensor(a!) self) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: abs_
- SparseCPU, SparseCUDA: abs_sparse_
- SparseCsrCPU, SparseCsrCUDA: abs_sparse_csr_
- - func: abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU, CUDA: abs_out
- MPS: abs_out_mps
- SparseCPU, SparseCUDA: abs_sparse_out
- SparseCsrCPU, SparseCsrCUDA: abs_sparse_csr_out
- tags: pointwise
- # Note [Adding an alias]
- # To add an alias do the following:
- #
- # 1) Copy the original functions native_functions.yaml entry, but replace the
- # original function's name with their own and delete any dispatch
- # keys for the aliases. Specifying a dispatch key will prevent
- # autograd from recording the operations the alias performs, which
- # will stop it from "inheriting" the original operation's autograd behavior.
- # 2) Implement the corresponding functions and have them redispatch to the
- # original function.
- # 3) Add docstrings to the new function that reference the original function,
- # and document the method as usual (if it exists.)
- # (See torch/_torch_docs.py and docs/source/torch.rst if adding a function,
- # torch/_tensor_docs.py and docs/source/tensors.rst if adding a method,
- # or module-specific doc bindings (like torch/linalg/__init__.py) if
- # adding an alias in a namespace.)
- # 4) Update torch/overrides.py consistent with the original function.
- # 5) Update the alias_map in torch/csrc/jit/passes/normalize_ops.cpp.
- # 6) Add aliases argument to existing OpInfo/UnaryUfuncInfo or create new OpInfo/UnaryUfuncInfo entry
- # in op_db list in torch/testing/_internal/common_methods_invocations.py
- #
- # See torch.absolute, an alias for torch.abs, as an example.
- # Absolute, alias for abs
- - func: absolute(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- - func: absolute_(Tensor(a!) self) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- - func: absolute.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- - func: angle(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- CPU, CUDA: angle
- SparseCsrCPU, SparseCsrCUDA: angle_sparse_csr
- tags: pointwise
- - func: angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU, CUDA: angle_out
- SparseCsrCPU, SparseCsrCUDA: angle_sparse_csr_out
- tags: pointwise
- - func: view_as_real(Tensor(a) self) -> Tensor(a)
- variants: function
- dispatch:
- CPU, CUDA, MPS, Meta: view_as_real
- - func: view_as_complex(Tensor(a) self) -> Tensor(a)
- variants: function
- dispatch:
- CPU, CUDA, Meta: view_as_complex
- - func: sgn(Tensor self) -> Tensor
- variants: function, method
- structured_delegate: sgn.out
- dispatch:
- SparseCPU, SparseCUDA: sgn_sparse
- SparseCsrCPU, SparseCsrCUDA: sgn_sparse_csr
- tags: pointwise
- - func: sgn_(Tensor(a!) self) -> Tensor(a!)
- variants: method
- structured_delegate: sgn.out
- dispatch:
- SparseCPU, SparseCUDA: sgn_sparse_
- SparseCsrCPU, SparseCsrCUDA: sgn_sparse_csr_
- tags: pointwise
- - func: sgn.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: sgn_out
- SparseCPU, SparseCUDA: sgn_sparse_out
- SparseCsrCPU, SparseCsrCUDA: sgn_sparse_csr_out
- tags: pointwise
- - func: chalf(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor
- variants: method
- - func: real(Tensor(a) self) -> Tensor(a)
- device_check: NoCheck # TensorIterator
- variants: function
- - func: imag(Tensor(a) self) -> Tensor(a)
- device_check: NoCheck # TensorIterator
- variants: function
- - func: _conj(Tensor(a) self) -> Tensor(a)
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: _conj
- - func: conj(Tensor(a) self) -> Tensor(a)
- variants: function, method
- manual_cpp_binding: True
- - func: _conj_physical(Tensor self) -> Tensor
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: _conj_physical
- SparseCsrCPU, SparseCsrCUDA: conj_physical_sparse_csr
- autogen: _conj_physical.out
- - func: conj_physical(Tensor self) -> Tensor
- variants: function, method
- tags: pointwise
- - func: conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU, CUDA: conj_physical_out
- SparseCPU, SparseCUDA: conj_physical_out_sparse
- SparseCsrCPU, SparseCsrCUDA: conj_physical_sparse_csr_out
- tags: pointwise
- - func: conj_physical_(Tensor(a!) self) -> Tensor(a!)
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: conj_physical_
- SparseCsrCPU, SparseCsrCUDA: conj_physical_sparse_csr_
- tags: pointwise
- - func: resolve_conj(Tensor(a) self) -> Tensor(a)
- variants: function, method
- - func: resolve_neg(Tensor(a) self) -> Tensor(a)
- variants: function, method
- - func: _neg_view(Tensor(a) self) -> Tensor(a)
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: _neg_view
- - func: acos(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- structured_delegate: acos.out
- tags: [core, pointwise]
- - func: acos_(Tensor(a!) self) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: function, method
- structured_delegate: acos.out
- tags: pointwise
- - func: acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: acos_out
- MPS: acos_out_mps
- tags: pointwise
- # arccos, alias of acos
- - func: arccos(Tensor self) -> Tensor
- variants: function, method
- - func: arccos_(Tensor(a!) self) -> Tensor(a!)
- variants: function, method
- - func: arccos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- - func: avg_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, bool ceil_mode=False, bool count_include_pad=True) -> Tensor
- - func: adaptive_avg_pool1d(Tensor self, int[1] output_size) -> Tensor
- # Return: (Tensor output, Tensor indices)
- - func: adaptive_max_pool1d(Tensor self, int[1] output_size) -> (Tensor, Tensor)
- - func: add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: add.out
- variants: function, method
- dispatch:
- SparseCPU, SparseCUDA: add_sparse
- SparseCsrCPU, SparseCsrCUDA: add_sparse_csr
- MkldnnCPU: mkldnn_add
- ZeroTensor: add_zerotensor
- NestedTensorCPU, NestedTensorCUDA: NestedTensor_add_Tensor
- tags: [core, pointwise]
- - func: add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- structured_delegate: add.out
- dispatch:
- SparseCPU, SparseCUDA: add_sparse_
- SparseCsrCPU, SparseCsrCUDA: add_sparse_csr_
- MkldnnCPU: mkldnn_add_
- NestedTensorCPU, NestedTensorCUDA: NestedTensor_add__Tensor
- tags: pointwise
- - func: add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- ufunc_inner_loop:
- Generic: add (AllAndComplex, BFloat16, Half, ComplexHalf)
- ScalarOnly: add (Bool)
- dispatch:
- SparseCPU: add_out_sparse_cpu
- SparseCUDA: add_out_sparse_cuda
- SparseCsrCPU: add_out_sparse_csr_cpu
- SparseCsrCUDA: add_out_sparse_csr_cuda
- MkldnnCPU: mkldnn_add_out
- MPS: add_out_mps
- tags: pointwise
- - func: _add_relu.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
- variants: function
- dispatch:
- CPU: add_relu
- - func: _add_relu_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
- variants: function
- dispatch:
- CPU: add_relu_
- - func: _add_relu.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
- variants: function
- dispatch:
- CPU: add_relu_out
- - func: _add_relu.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
- variants: function
- dispatch:
- CPU: add_relu
- - func: _add_relu_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
- variants: function
- dispatch:
- CPU: add_relu_
- autogen: _add_relu.Scalar_out
- # For C++ only, until we have conversion from C++ numbers to Tensor
- - func: add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: add
- tags: [core, pointwise]
- - func: add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- dispatch:
- CompositeExplicitAutograd: add_
- autogen: add.Scalar_out
- tags: pointwise
- - func: addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor
- structured_delegate: addmv.out
- variants: function, method
- - func: addmv_(Tensor(a!) self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
- structured_delegate: addmv.out
- variants: function, method
- - func: addmv.out(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
- structured: True
- dispatch:
- CPU: addmv_out_cpu
- CUDA: addmv_out_cuda
- MPS: addmv_out_mps
- SparseCsrCPU: addmv_out_sparse_compressed
- SparseCsrCUDA: addmv_out_sparse_compressed_cuda
- - func: addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
- variants: function, method
- dispatch:
- CPU, CUDA: addr
- MPS: addr_mps
- CompositeExplicitAutograd: math_addr
- - func: addr_(Tensor(a!) self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
- variants: method
- dispatch:
- CompositeExplicitAutograd: addr_
- - func: addr.out(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU, CUDA: addr_out
- MPS: addr_out_mps
- CompositeExplicitAutograd: math_addr_out
- - func: affine_grid_generator(Tensor theta, int[] size, bool align_corners) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutograd: affine_grid_generator
- autogen: affine_grid_generator.out
- - func: affine_grid_generator_backward(Tensor grad, int[] size, bool align_corners) -> Tensor
- variants: function
- - func: _is_all_true(Tensor self) -> Tensor
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: _is_all_true
- - func: _is_any_true(Tensor self) -> Tensor
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: _is_any_true
- # Note: this function is only for testing.
- - func: _test_check_tensor(Tensor self) -> Tensor
- variants: function
- - func: all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: all.out
- variants: function, method
- - func: all.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- precomputed:
- - dim -> int dim
- dispatch:
- CPU, CUDA: all_out
- MPS: all_out_mps
- - func: all.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- - func: all.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- - func: allclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> bool
- variants: function, method
- tags: data_dependent_output
- dispatch:
- CompositeExplicitAutograd: allclose
- - func: any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: any.out
- variants: function, method
- - func: any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- precomputed:
- - dim -> int dim
- dispatch:
- CPU, CUDA: any_out
- MPS: any_out_mps
- - func: any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- - func: any.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- - func: arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- dispatch:
- CompositeExplicitAutograd: arange
- - func: arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- dispatch:
- CompositeExplicitAutograd: arange
- # This operator should be named `aragne.start_out` if following the naming convention. However that
- # name is already taken. Disabled because of CI job failures.
- # FIXME: enable this
- #- func: arange.start_out_(Scalar start, Scalar end, *, Tensor(a!) out) -> Tensor(a!)
- # dispatch:
- # CompositeExplicitAutograd: arange_start_out
- - func: arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- dispatch:
- CompositeExplicitAutograd: arange
- cpp_no_default_args: ['step']
- tags: core
- - func: arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CompositeExplicitAutograd: arange_out
- - func: arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU, Meta: arange_out
- CUDA: arange_cuda_out
- MPS: arange_mps_out
- cpp_no_default_args: ['step']
- # This function is a temporary hack to allow tracing of arange like constructs with dynamic
- # bounds on arange. Normal arange is not traceable because it does not take any tensor inputs;
- # if the range you need is based on another tensor, calling this function directly will
- # preserve tracing. Get rid of this when arange can directly take tensors for bounds
- # (so that it can be traced directly).
- - func: _dim_arange(Tensor like, int dim) -> Tensor
- - func: argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor
- structured_delegate: argmax.out
- device_check: NoCheck # TensorIterator
- variants: function, method
- tags: core
- - func: argmax.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- dispatch:
- CPU, CUDA: argmax_out
- MPS: argmax_out_mps
- - func: argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor
- structured_delegate: argmin.out
- device_check: NoCheck # TensorIterator
- variants: function, method
- tags: core
- - func: argmin.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- dispatch:
- CPU, CUDA: argmin_out
- MPS: argmin_out_mps
- - func: acosh(Tensor self) -> Tensor
- variants: function, method
- structured_delegate: acosh.out
- tags: [core, pointwise]
- - func: acosh_(Tensor(a!) self) -> Tensor(a!)
- variants: function, method
- structured_delegate: acosh.out
- tags: pointwise
- - func: acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: acosh_out
- MPS: acosh_out_mps
- tags: pointwise
- # arccosh, alias for acosh
- - func: arccosh(Tensor self) -> Tensor
- variants: function, method
- - func: arccosh_(Tensor(a!) self) -> Tensor(a!)
- variants: function, method
- - func: arccosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- - func: asinh(Tensor self) -> Tensor
- variants: function, method
- structured_delegate: asinh.out
- dispatch:
- SparseCPU, SparseCUDA: asinh_sparse
- SparseCsrCPU, SparseCsrCUDA: asinh_sparse_csr
- tags: [core, pointwise]
- - func: asinh_(Tensor(a!) self) -> Tensor(a!)
- variants: function, method
- structured_delegate: asinh.out
- dispatch:
- SparseCPU, SparseCUDA: asinh_sparse_
- SparseCsrCPU, SparseCsrCUDA: asinh_sparse_csr_
- tags: pointwise
- - func: asinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: asinh_out
- MPS: asinh_out_mps
- SparseCPU, SparseCUDA: asinh_sparse_out
- SparseCsrCPU, SparseCsrCUDA: asinh_sparse_csr_out
- tags: pointwise
- # arcsinh, alias for asinh
- - func: arcsinh(Tensor self) -> Tensor
- variants: function, method
- - func: arcsinh_(Tensor(a!) self) -> Tensor(a!)
- variants: function, method
- - func: arcsinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- - func: atanh(Tensor self) -> Tensor
- structured_delegate: atanh.out
- variants: function, method
- dispatch:
- SparseCPU, SparseCUDA: atanh_sparse
- SparseCsrCPU, SparseCsrCUDA: atanh_sparse_csr
- tags: [core, pointwise]
- - func: atanh_(Tensor(a!) self) -> Tensor(a!)
- structured_delegate: atanh.out
- variants: function, method
- dispatch:
- SparseCPU, SparseCUDA: atanh_sparse_
- SparseCsrCPU, SparseCsrCUDA: atanh_sparse_csr_
- tags: pointwise
- - func: atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: atanh_out
- MPS: atanh_out_mps
- SparseCPU, SparseCUDA: atanh_sparse_out
- SparseCsrCPU, SparseCsrCUDA: atanh_sparse_csr_out
- tags: pointwise
- # arctanh, alias for atanh
- - func: arctanh(Tensor self) -> Tensor
- variants: function, method
- - func: arctanh_(Tensor(a!) self) -> Tensor(a!)
- variants: function, method
- - func: arctanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- - func: as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a)
- variants: function, method
- dispatch:
- ZeroTensor, CPU, CUDA: as_strided_tensorimpl
- Meta: as_strided_tensorimpl_meta_symint
- MPS: as_strided_tensorimpl_mps
- QuantizedCPU, QuantizedCUDA: as_strided_qtensorimpl
- device_check: NoCheck
- device_guard: False
- tags: core
- - func: as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!)
- use_const_ref_for_mutable_tensors: True
- variants: function, method
- device_check: NoCheck
- device_guard: False
- tags: inplace_view
- dispatch:
- CompositeExplicitAutogradNonFunctional: as_strided__symint
- - func: asin(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- structured_delegate: asin.out
- dispatch:
- SparseCPU, SparseCUDA: asin_sparse
- SparseCsrCPU, SparseCsrCUDA: asin_sparse_csr
- tags: [core, pointwise]
- - func: asin_(Tensor(a!) self) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: function, method
- structured_delegate: asin.out
- dispatch:
- SparseCPU, SparseCUDA: asin_sparse_
- SparseCsrCPU, SparseCsrCUDA: asin_sparse_csr_
- tags: pointwise
- - func: asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: asin_out
- MPS: asin_out_mps
- SparseCPU, SparseCUDA: asin_sparse_out
- SparseCsrCPU, SparseCsrCUDA: asin_sparse_csr_out
- tags: pointwise
- # arcsin, alias of asin
- - func: arcsin(Tensor self) -> Tensor
- variants: function, method
- - func: arcsin_(Tensor(a!) self) -> Tensor(a!)
- variants: function, method
- - func: arcsin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- - func: atan(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: atan.out
- variants: function, method
- dispatch:
- SparseCPU, SparseCUDA: atan_sparse
- SparseCsrCPU, SparseCsrCUDA: atan_sparse_csr
- tags: [core, pointwise]
- - func: atan_(Tensor(a!) self) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured_delegate: atan.out
- variants: function, method
- dispatch:
- SparseCPU, SparseCUDA: atan_sparse_
- SparseCsrCPU, SparseCsrCUDA: atan_sparse_csr_
- tags: pointwise
- - func: atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: atan_out
- MPS: atan_out_mps
- SparseCPU, SparseCUDA: atan_sparse_out
- SparseCsrCPU, SparseCsrCUDA: atan_sparse_csr_out
- tags: pointwise
- # arctan, alias of atan
- - func: arctan(Tensor self) -> Tensor
- variants: function, method
- - func: arctan_(Tensor(a!) self) -> Tensor(a!)
- variants: function, method
- - func: arctan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- - func: atleast_1d(Tensor self) -> Tensor
- variants: function
- - func: atleast_1d.Sequence(Tensor[] tensors) -> Tensor[]
- - func: atleast_2d(Tensor self) -> Tensor
- variants: function
- - func: atleast_2d.Sequence(Tensor[] tensors) -> Tensor[]
- variants: function
- - func: atleast_3d(Tensor self) -> Tensor
- variants: function
- - func: atleast_3d.Sequence(Tensor[] tensors) -> Tensor[]
- variants: function
- - func: baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
- variants: function, method
- structured_delegate: baddbmm.out
- - func: baddbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
- variants: method
- structured_delegate: baddbmm.out
- - func: baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
- structured: True
- variants: function
- dispatch:
- CPU: baddbmm_out_cpu
- CUDA: baddbmm_out_cuda
- MPS: baddbmm_out_mps
- SparseCsrCUDA: baddbmm_out_sparse_csr_cuda
- - func: bartlett_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- dispatch:
- CompositeExplicitAutograd: bartlett_window
- autogen: bartlett_window.out
- - func: bartlett_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- dispatch:
- CompositeExplicitAutograd: bartlett_window
- autogen: bartlett_window.periodic_out
- - func: batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor
- - func: quantized_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor
- dispatch:
- QuantizedCPU: quantized_batch_norm
- autogen: quantized_batch_norm.out
- - func: _batch_norm_impl_index(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> (Tensor, Tensor, Tensor, Tensor, int)
- - func: _batch_norm_impl_index_backward(int impl_index, Tensor input, Tensor grad_output, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var_transform, bool train, float eps, bool[3] output_mask, Tensor reservedSpace) -> (Tensor, Tensor, Tensor)
- # Sample bernoulli with values in `self` as probability.
- - func: bernoulli(Tensor self, *, Generator? generator=None) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: bernoulli
- tags: nondeterministic_seeded
- - func: bernoulli.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: function
- tags: nondeterministic_seeded
- dispatch:
- CPU, CUDA: bernoulli_out
- MPS: bernoulli_out_mps
- - func: bernoulli_.Tensor(Tensor(a!) self, Tensor p, *, Generator? generator=None) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- tags: nondeterministic_seeded
- dispatch:
- CPU, CUDA: bernoulli_
- MPS: bernoulli_mps_
- autogen: bernoulli.Tensor, bernoulli.Tensor_out
- - func: bernoulli_.float(Tensor(a!) self, float p=0.5, *, Generator? generator=None) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- tags: nondeterministic_seeded
- dispatch:
- CPU, CUDA: bernoulli_
- MPS: bernoulli_mps_
- autogen: bernoulli.float_out
- # Note [bernoulli.p schema]
- # We should probably just fix the overload ambiguity by appending a _functional to the C++ API name (BC breaking)
- # This out-of-place version isn't used explicitly, but needed by jit.
- # There is no default valid on `p` here because it would introduce ambiguity
- # with `bernoulli(Tensor self, *, Generator? generator=None)` declaration.
- - func: bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- tags: nondeterministic_seeded
- dispatch:
- CompositeExplicitAutogradNonFunctional: bernoulli
- - func: bilinear(Tensor input1, Tensor input2, Tensor weight, Tensor? bias=None) -> Tensor
- - func: binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor
- device_check: NoCheck # TensorIterator
- python_module: nn
- variants: function
- dispatch:
- CPU: binary_cross_entropy_cpu
- CUDA: binary_cross_entropy_cuda
- MPS: binary_cross_entropy_mps
- - func: binary_cross_entropy.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- python_module: nn
- variants: function
- dispatch:
- CPU: binary_cross_entropy_out_cpu
- CUDA: binary_cross_entropy_out_cuda
- MPS: binary_cross_entropy_out_mps
- - func: binary_cross_entropy_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor
- python_module: nn
- variants: function
- dispatch:
- CPU: binary_cross_entropy_backward_cpu
- CUDA: binary_cross_entropy_backward_cuda
- MPS: binary_cross_entropy_backward_mps
- - func: binary_cross_entropy_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- variants: function
- dispatch:
- CPU: binary_cross_entropy_backward_out_cpu
- CUDA: binary_cross_entropy_backward_out_cuda
- MPS: binary_cross_entropy_backward_out_mps
- - func: binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function
- dispatch:
- CompositeExplicitAutograd: binary_cross_entropy_with_logits
- autogen: binary_cross_entropy_with_logits.out
- - func: bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor
- variants: function, method
- dispatch:
- CPU: _bincount_cpu
- CUDA: _bincount_cuda
- MPS: _bincount_mps
- tags: dynamic_output_shape
- autogen: bincount.out
- - func: bitwise_not(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: bitwise_not.out
- variants: function, method
- tags: [core, pointwise]
- - func: bitwise_not_(Tensor(a!) self) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured_delegate: bitwise_not.out
- variants: method
- tags: pointwise
- - func: bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: bitwise_not_out
- tags: pointwise
- - func: copysign.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: copysign_out
- tags: pointwise
- - func: copysign.Tensor(Tensor self, Tensor other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- structured_delegate: copysign.out
- tags: pointwise
- - func: copysign_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- structured_delegate: copysign.out
- - func: copysign.Scalar(Tensor self, Scalar other) -> Tensor
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: copysign
- tags: pointwise
- - func: copysign_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
- variants: method
- dispatch:
- CompositeExplicitAutograd: copysign_
- - func: copysign.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CompositeExplicitAutograd: copysign_out
- tags: pointwise
- - func: logical_not(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: logical_not
- tags: [core, pointwise]
- - func: logical_not_(Tensor(a!) self) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- dispatch:
- CompositeExplicitAutograd: logical_not_
- tags: pointwise
- - func: logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU, CUDA: logical_not_out
- MPS: logical_not_out_mps
- tags: pointwise
- - func: logical_xor(Tensor self, Tensor other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: logical_xor
- tags: pointwise
- - func: logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- dispatch:
- CompositeExplicitAutograd: logical_xor_
- tags: pointwise
- - func: logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU, CUDA: logical_xor_out
- MPS: logical_xor_out_mps
- tags: pointwise
- - func: logical_and(Tensor self, Tensor other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: logical_and
- tags: [core, pointwise]
- - func: logical_and_(Tensor(a!) self, Tensor other) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- dispatch:
- CompositeExplicitAutograd: logical_and_
- tags: pointwise
- - func: logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU, CUDA: logical_and_out
- MPS: logical_and_out_mps
- tags: pointwise
- - func: logical_or(Tensor self, Tensor other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: logical_or
- tags: [core, pointwise]
- - func: logical_or_(Tensor(a!) self, Tensor other) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- dispatch:
- CompositeExplicitAutograd: logical_or_
- tags: pointwise
- - func: logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU, CUDA: logical_or_out
- MPS: logical_or_out_mps
- tags: pointwise
- - func: blackman_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- dispatch:
- CompositeExplicitAutograd: blackman_window
- autogen: blackman_window.out
- - func: blackman_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- dispatch:
- CompositeExplicitAutograd: blackman_window
- autogen: blackman_window.periodic_out
- - func: bmm(Tensor self, Tensor mat2) -> Tensor
- structured_delegate: bmm.out
- variants: function, method
- dispatch:
- SparseCPU: bmm_sparse_cpu
- SparseCUDA: bmm_sparse_cuda
- NestedTensorCPU: bmm_nested
- NestedTensorCUDA: bmm_nested_cuda
- tags: core
- - func: bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- variants: function
- dispatch:
- CPU: bmm_out_cpu
- CUDA: bmm_out_cuda
- MPS: bmm_out_mps
- SparseCPU: bmm_out_sparse_cpu
- SparseCUDA: bmm_out_sparse_cuda
- SparseCsrCUDA: bmm_out_sparse_csr_cuda
- - func: broadcast_tensors(Tensor[] tensors) -> Tensor[]
- device_check: NoCheck
- device_guard: False
- - func: broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a)
- variants: function, method
- dispatch:
- CompositeImplicitAutograd: broadcast_to_symint
- - func: _sparse_broadcast_to(Tensor(a) self, int[] size) -> Tensor(a)
- variants: function
- dispatch:
- SparseCPU, SparseCUDA: sparse_broadcast_to
- - func: cat(Tensor[] tensors, int dim=0) -> Tensor
- structured_delegate: cat.out
- dispatch:
- SparseCPU, SparseCUDA: cat_sparse
- QuantizedCPU: cat_quantized_cpu
- tags: core
- - func: cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- precomputed:
- - dim -> int dim, int valid, bool all_contiguous, bool all_same_dtype, bool all_same_sizes_and_stride, MemoryFormat memory_format
- dispatch:
- CPU: cat_out_cpu
- CUDA: cat_out_cuda
- MPS: cat_out_mps
- QuantizedCPU: cat_out_quantized_cpu
- - func: cat.names(Tensor[] tensors, Dimname dim) -> Tensor
- - func: cat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
- # alias for torch.cat
- - func: concat(Tensor[] tensors, int dim=0) -> Tensor
- - func: concat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
- - func: concat.names(Tensor[] tensors, Dimname dim) -> Tensor
- - func: concat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
- # alias for torch.cat
- - func: concatenate(Tensor[] tensors, int dim=0) -> Tensor
- - func: concatenate.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
- - func: concatenate.names(Tensor[] tensors, Dimname dim) -> Tensor
- - func: concatenate.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
- - func: block_diag(Tensor[] tensors) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutograd: block_diag
- autogen: block_diag.out
- - func: ceil(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: ceil.out
- variants: function, method
- dispatch:
- SparseCPU, SparseCUDA: ceil_sparse
- SparseCsrCPU, SparseCsrCUDA: ceil_sparse_csr
- tags: pointwise
- - func: ceil_(Tensor(a!) self) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured_delegate: ceil.out
- variants: function, method
- dispatch:
- SparseCPU, SparseCUDA: ceil_sparse_
- SparseCsrCPU, SparseCsrCUDA: ceil_sparse_csr_
- tags: pointwise
- - func: ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: ceil_out
- MPS: ceil_out_mps
- SparseCPU, SparseCUDA: ceil_sparse_out
- SparseCsrCPU, SparseCsrCUDA: ceil_sparse_csr_out
- tags: pointwise
- # alias for torch.linalg.multi_dot
- - func: chain_matmul(Tensor[] matrices) -> Tensor
- variants: function
- # alias for torch.linalg.multi_dot
- - func: chain_matmul.out(Tensor[] matrices, *, Tensor(a!) out) -> Tensor(a!)
- - func: unsafe_chunk(Tensor self, int chunks, int dim=0) -> Tensor[]
- variants: function, method
- device_check: NoCheck
- device_guard: False
- - func: chunk(Tensor(a -> *) self, int chunks, int dim=0) -> Tensor(a)[]
- variants: function, method
- device_check: NoCheck
- device_guard: False
- dispatch:
- CompositeImplicitAutograd: chunk
- NestedTensorCPU, NestedTensorCUDA: chunk_nested_tensor
- - func: tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[]
- variants: function, method
- dispatch:
- CompositeImplicitAutograd: tensor_split_sections_symint
- - func: tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[]
- variants: function, method
- dispatch:
- CompositeImplicitAutograd: tensor_split_indices_symint
- - func: tensor_split.tensor_indices_or_sections(Tensor(a -> *) self, Tensor tensor_indices_or_sections, int dim=0) -> Tensor(a)[]
- variants: function, method
- - func: clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- cpp_no_default_args: ['min']
- structured_delegate: clamp.out
- dispatch:
- QuantizedCPU: clamp_quantized_cpu
- tags: [core, pointwise]
- - func: clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor
- variants: function, method
- structured_delegate: clamp.Tensor_out
- tags: pointwise
- - func: clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: function, method
- cpp_no_default_args: ['min']
- structured_delegate: clamp.out
- tags: pointwise
- - func: clamp_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)
- variants: function, method
- structured_delegate: clamp.Tensor_out
- tags: pointwise
- - func: clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- cpp_no_default_args: ['min']
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: clamp_out
- MPS: clamp_out_mps
- tags: pointwise
- - func: clamp.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: clamp_Tensor_out
- MPS: clamp_Tensor_out_mps
- tags: pointwise
- - func: clamp_max(Tensor self, Scalar max) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- structured_delegate: clamp_max.out
- tags: pointwise
- - func: clamp_max.Tensor(Tensor self, Tensor max) -> Tensor
- variants: function, method
- structured_delegate: clamp_max.Tensor_out
- tags: pointwise
- - func: clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: function, method
- structured_delegate: clamp_max.out
- tags: pointwise
- - func: clamp_max_.Tensor(Tensor(a!) self, Tensor max) -> Tensor(a!)
- variants: function, method
- structured_delegate: clamp_max.Tensor_out
- tags: pointwise
- - func: clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: clamp_max_out
- MPS: clamp_max_out_mps
- tags: pointwise
- - func: clamp_max.Tensor_out(Tensor self, Tensor max, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: clamp_max_Tensor_out
- MPS: clamp_max_Tensor_out_mps
- tags: pointwise
- - func: clamp_min(Tensor self, Scalar min) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- structured_delegate: clamp_min.out
- tags: pointwise
- - func: clamp_min.Tensor(Tensor self, Tensor min) -> Tensor
- variants: function, method
- structured_delegate: clamp_min.Tensor_out
- tags: pointwise
- - func: clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: function, method
- structured_delegate: clamp_min.out
- tags: pointwise
- - func: clamp_min_.Tensor(Tensor(a!) self, Tensor min) -> Tensor(a!)
- variants: function, method
- structured_delegate: clamp_min.Tensor_out
- tags: pointwise
- - func: clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: clamp_min_out
- MPS: clamp_min_out_mps
- tags: pointwise
- - func: clamp_min.Tensor_out(Tensor self, Tensor min, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: clamp_min_Tensor_out
- MPS: clamp_min_Tensor_out_mps
- tags: pointwise
- # clip is an alias for clamp
- - func: clip(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor
- cpp_no_default_args: ['min']
- variants: function, method
- tags: pointwise
- - func: clip.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor
- variants: function, method
- tags: pointwise
- - func: clip_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)
- cpp_no_default_args: ['min']
- variants: function, method
- tags: pointwise
- - func: clip_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)
- variants: function, method
- tags: pointwise
- - func: clip.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)
- cpp_no_default_args: ['min']
- tags: pointwise
- - func: clip.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)
- - func: cudnn_is_acceptable(Tensor self) -> bool
- device_check: NoCheck
- device_guard: False
- - func: complex(Tensor real, Tensor imag) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutograd: complex
- - func: complex.out(Tensor real, Tensor imag, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU, CUDA: complex_out
- - func: polar(Tensor abs, Tensor angle) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutograd: polar
- - func: polar.out(Tensor abs, Tensor angle, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU, CUDA: polar_out
- - func: constant_pad_nd(Tensor self, SymInt[] pad, Scalar value=0) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutograd: constant_pad_nd
- MPS: constant_pad_nd_mps
- autogen: constant_pad_nd.out
- tags: core
- - func: contiguous(Tensor(a) self, *, MemoryFormat memory_format=contiguous_format) -> Tensor(a)
- variants: method
- manual_cpp_binding: True
- - func: convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups) -> Tensor
- dispatch:
- CompositeExplicitAutograd: convolution
- autogen: convolution.out
- tags: core
- - func: convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
- dispatch:
- CompositeExplicitAutograd, CUDA: convolution_backward
- autogen: convolution_backward.out
- tags: core
- - func: convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor
- dispatch:
- CompositeExplicitAutograd: convolution_overrideable
- autogen: convolution_overrideable.out
- - func: convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)
- dispatch:
- CompositeExplicitAutograd: convolution_backward_overrideable
- autogen: convolution_backward_overrideable.out
- - func: _convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor
- dispatch:
- CompositeExplicitAutograd: _convolution
- autogen: _convolution.out
- - func: _convolution.deprecated(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor
- - func: _convolution_mode(Tensor input, Tensor weight, Tensor? bias, int[] stride, str padding, int[] dilation, int groups) -> Tensor
- - func: _convolution_double_backward(Tensor? ggI, Tensor? ggW, Tensor? ggb, Tensor gO, Tensor weight, Tensor self, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
- - func: conv1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] dilation=1, int groups=1) -> Tensor
- - func: conv2d(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, int groups=1) -> Tensor
- - func: conv3d(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1, int groups=1) -> Tensor
- - func: conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, str padding="valid", int[1] dilation=1, int groups=1) -> Tensor
- cpp_no_default_args: ['bias', 'stride', 'padding']
- - func: conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, str padding="valid", int[2] dilation=1, int groups=1) -> Tensor
- cpp_no_default_args: ['bias', 'stride', 'padding']
- - func: conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, str padding="valid", int[3] dilation=1, int groups=1) -> Tensor
- cpp_no_default_args: ['bias', 'stride', 'padding']
- - func: conv_tbc(Tensor self, Tensor weight, Tensor bias, int pad=0) -> Tensor
- dispatch:
- CompositeExplicitAutograd: conv_tbc
- autogen: conv_tbc.out
- - func: conv_tbc_backward(Tensor self, Tensor input, Tensor weight, Tensor bias, int pad) -> (Tensor, Tensor, Tensor)
- # NB: we inherit the goofy argument order from PyTorch torch.nn.functional
- - func: conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] output_padding=0, int groups=1, int[1] dilation=1) -> Tensor
- - func: conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int groups=1, int[2] dilation=1) -> Tensor
- - func: conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int groups=1, int[3] dilation=1) -> Tensor
- - func: copy(Tensor self, Tensor src, bool non_blocking=False) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutogradNonFunctional: copy
- - func: copy_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)
- variants: method
- device_check: NoCheck
- device_guard: False
- dispatch:
- MkldnnCPU: copy_mkldnn_
- SparseCPU, SparseCUDA: copy_sparse_wrapper_
- CompositeExplicitAutograd: copy_
- SparseCsrCPU, SparseCsrCUDA: copy_sparse_compressed_
- NestedTensorCPU, NestedTensorCUDA: copy_nested_
- autogen: copy.out
- - func: _copy_from(Tensor self, Tensor dst, bool non_blocking=False) -> Tensor
- dispatch:
- MPS: _copy_from_mps
- autogen: _copy_from.out
- # We need this to be able to properly copy from a CPU to an XLA tensor with different sizes.
- # See https://github.com/pytorch/xla/issues/2881
- - func: _copy_from_and_resize(Tensor self, Tensor dst) -> Tensor
- dispatch:
- MPS: _copy_from_and_resize_mps
- autogen: _copy_from_and_resize.out
- - func: cos(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- structured_delegate: cos.out
- tags: [core, pointwise]
- - func: cos_(Tensor(a!) self) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: function, method
- structured_delegate: cos.out
- tags: pointwise
- - func: cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: cos_out
- MPS: cos_out_mps
- tags: pointwise
- - func: cosh(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- structured_delegate: cosh.out
- tags: [core, pointwise]
- - func: cosh_(Tensor(a!) self) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: function, method
- structured_delegate: cosh.out
- tags: pointwise
- - func: cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: cosh_out
- MPS: cosh_out_mps
- tags: pointwise
- - func: cosine_embedding_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor
- - func: count_nonzero.dim_IntList(Tensor self, int[] dim) -> Tensor
- variants: function, method
- dispatch:
- CPU: count_nonzero_cpu
- CUDA: count_nonzero_cuda
- MPS: count_nonzero_mps
- autogen: count_nonzero.dim_IntList_out
- - func: count_nonzero(Tensor self, int? dim=None) -> Tensor
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: count_nonzero
- autogen: count_nonzero.out
- - func: cov(Tensor self, *, int correction=1, Tensor? fweights=None, Tensor? aweights=None) -> Tensor
- variants: function, method
- - func: corrcoef(Tensor self) -> Tensor
- variants: function, method
- - func: cudnn_affine_grid_generator(Tensor theta, int N, int C, int H, int W) -> Tensor grid
- dispatch:
- CUDA: cudnn_affine_grid_generator_forward
- autogen: cudnn_affine_grid_generator.out
- # TODO: Why do I have to call this grad?!
- - func: cudnn_affine_grid_generator_backward(Tensor grad, int N, int C, int H, int W) -> Tensor grad_theta
- dispatch:
- CUDA: cudnn_affine_grid_generator_backward
- autogen: cudnn_affine_grid_generator_backward.out
- - func: cudnn_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor, Tensor)
- dispatch:
- CUDA: cudnn_batch_norm
- autogen: cudnn_batch_norm.out
- # NB: You can only use this if you used cudnn_batch_norm training=True
- - func: cudnn_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace) -> (Tensor, Tensor, Tensor)
- dispatch:
- CUDA: cudnn_batch_norm_backward
- autogen: cudnn_batch_norm_backward.out
- - func: cudnn_convolution(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor
- dispatch:
- CUDA: cudnn_convolution
- autogen: cudnn_convolution.out
- - func: cudnn_convolution_transpose(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor
- dispatch:
- CUDA: cudnn_convolution_transpose
- autogen: cudnn_convolution_transpose.out
- - func: _mps_convolution_transpose(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups) -> Tensor
- dispatch:
- MPS: _mps_convolution_transpose
- autogen: _mps_convolution_transpose.out
- - func: mps_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool[2] output_mask) -> (Tensor, Tensor)
- dispatch:
- MPS: mps_convolution_transpose_backward
- autogen: mps_convolution_transpose_backward.out
- - func: cudnn_convolution_relu(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor
- dispatch:
- CUDA: cudnn_convolution_relu
- autogen: cudnn_convolution_relu.out
- - func: cudnn_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor
- dispatch:
- CUDA: cudnn_convolution_add_relu
- autogen: cudnn_convolution_add_relu.out
- # NB: input is special cased in a way I don't quite understand
- - func: cudnn_grid_sampler(Tensor self, Tensor grid) -> Tensor output
- dispatch:
- CUDA: cudnn_grid_sampler_forward
- autogen: cudnn_grid_sampler.out
- - func: cudnn_grid_sampler_backward(Tensor self, Tensor grid, Tensor grad_output) -> (Tensor grad_self, Tensor grad_grid)
- dispatch:
- CUDA: cudnn_grid_sampler_backward
- autogen: cudnn_grid_sampler_backward.out
- - func: cummax(Tensor self, int dim) -> (Tensor values, Tensor indices)
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: cummax
- - func: cummax.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
- device_check: NoCheck # TensorIterator
- dispatch:
- CompositeExplicitAutograd: cummax_out
- - func: cummax.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices)
- device_check: NoCheck # TensorIterator
- variants: function, method
- - func: cummax.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
- device_check: NoCheck # TensorIterator
- - func: _cummax_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> ()
- variants: function
- dispatch:
- CPU: cummax_helper_cpu
- CUDA: cummax_helper_cuda
- - func: cummin(Tensor self, int dim) -> (Tensor values, Tensor indices)
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: cummin
- - func: cummin.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
- device_check: NoCheck # TensorIterator
- dispatch:
- CompositeExplicitAutograd: cummin_out
- - func: cummin.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices)
- device_check: NoCheck # TensorIterator
- variants: function, method
- - func: cummin.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
- device_check: NoCheck # TensorIterator
- - func: _cummin_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> ()
- variants: function
- dispatch:
- CPU: cummin_helper_cpu
- CUDA: cummin_helper_cuda
- - func: cummaxmin_backward(Tensor grad, Tensor input, Tensor indices, int dim) -> Tensor
- variants: function
- device_check: NoCheck
- device_guard: False
- - func: cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor
- structured_delegate: cumprod.out
- device_check: NoCheck # TensorIterator
- variants: function, method
- - func: cumprod_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)
- structured_delegate: cumprod.out
- variants: method
- - func: cumprod.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
- structured: True
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU, CUDA: cumprod_out
- - func: cumprod.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- - func: cumprod_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!)
- variants: method
- - func: cumprod.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- - func: cumprod_backward(Tensor grad, Tensor input, int dim, Tensor output) -> Tensor
- variants: function
- device_check: NoCheck
- device_guard: False
- - func: cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor
- structured_delegate: cumsum.out
- device_check: NoCheck # TensorIterator
- variants: function, method
- - func: cumsum_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)
- structured_delegate: cumsum.out
- variants: method
- - func: cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
- structured: True
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU, CUDA: cumsum_out
- MPS: cumsum_out_mps
- - func: cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- - func: cumsum_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!)
- variants: method
- - func: cumsum.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- - func: cumulative_trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor
- - func: cumulative_trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor
- - func: ctc_loss.IntList(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor
- # convenience function that converts to intlists for you
- - func: ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor
- - func: _ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)
- dispatch:
- CPU: ctc_loss_cpu
- CUDA: ctc_loss_gpu
- autogen: _ctc_loss.out
- tags: dynamic_output_shape # the shape of second output is data dependent
- - func: _ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)
- dispatch:
- CPU, CUDA: ctc_loss_tensor
- autogen: _ctc_loss.Tensor_out
- tags: dynamic_output_shape # the shape of second output is data dependent
- - func: _ctc_loss_backward(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor
- dispatch:
- CPU: ctc_loss_backward_cpu
- CUDA: ctc_loss_backward_gpu
- autogen: _ctc_loss_backward.out
- - func: _ctc_loss_backward.Tensor(Tensor grad, Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor
- dispatch:
- CPU, CUDA: ctc_loss_backward_tensor
- - func: diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor
- variants: function, method
- dispatch:
- CompositeExplicitAutogradNonFunctional: diag_embed
- autogen: diag_embed.out
- - func: diagflat(Tensor self, int offset=0) -> Tensor
- variants: function, method
- - func: diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a)
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: diagonal
- - func: linalg_diagonal(Tensor(a) A, *, int offset=0, int dim1=-2, int dim2=-1) -> Tensor(a)
- python_module: linalg
- variants: function
- - func: diagonal.Dimname(Tensor(a) self, *, Dimname outdim, Dimname dim1, Dimname dim2, int offset=0) -> Tensor(a)
- variants: function, method
- - func: diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2) -> Tensor
- variants: function
- device_check: NoCheck
- device_guard: False
- dispatch:
- CompositeExplicitAutograd: diagonal_backward_symint
- autogen: diagonal_backward.out
- - func: fill_diagonal_(Tensor(a!) self, Scalar fill_value, bool wrap=False) -> Tensor(a!)
- variants: method
- - func: diff(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> Tensor
- variants: function, method
- - func: diff.out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!)
- variants: function
- - func: gradient.scalarint(Tensor self, *, Scalar? spacing=None, int? dim=None, int edge_order=1) -> Tensor[]
- variants: function
- - func: gradient.scalararray(Tensor self, *, Scalar spacing, int[] dim, int edge_order=1) -> Tensor[]
- variants: function
- - func: gradient.array(Tensor self, *, int[] dim, int edge_order=1) -> Tensor[]
- variants: function
- - func: gradient.scalarrayint(Tensor self, *, Scalar[] spacing, int? dim=None, int edge_order=1) -> Tensor[]
- variants: function
- - func: gradient.scalarrayarray(Tensor self, *, Scalar[] spacing, int[] dim, int edge_order=1) -> Tensor[]
- variants: function
- - func: gradient.tensorarrayint(Tensor self, *, Tensor[] spacing, int? dim=None, int edge_order=1) -> Tensor[]
- variants: function
- - func: gradient.tensorarray(Tensor self, *, Tensor[] spacing, int[] dim, int edge_order=1) -> Tensor[]
- variants: function
- - func: div.Tensor(Tensor self, Tensor other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- structured_delegate: div.out
- dispatch:
- SparseCPU, SparseCUDA: div_sparse
- ZeroTensor: div_zerotensor
- NestedTensorCPU, NestedTensorCUDA: NestedTensor_div_Tensor
- tags: [core, pointwise]
- - func: div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- structured_delegate: div.out
- dispatch:
- SparseCPU, SparseCUDA: div_sparse_
- tags: pointwise
- - func: div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: div_out
- MPS: div_out_mps
- SparseCPU, SparseCUDA: div_out_sparse_zerodim
- tags: pointwise
- - func: div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- structured_delegate: div.out_mode
- dispatch:
- SparseCPU, SparseCUDA: div_sparse
- tags: pointwise
- - func: div_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- structured_delegate: div.out_mode
- dispatch:
- SparseCPU, SparseCUDA: div_sparse_
- tags: pointwise
- - func: div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: div_out_mode
- MPS: div_out_mode_mps
- SparseCPU, SparseCUDA: div_out_sparse_zerodim
- tags: pointwise
- # For C++ only, until we have conversion from C++ numbers to Tensor
- - func: div.Scalar(Tensor self, Scalar other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: div
- NestedTensorCPU, NestedTensorCUDA: NestedTensor_div_Scalar
- tags: [core, pointwise]
- - func: div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- dispatch:
- CompositeExplicitAutograd: div_
- autogen: div.Scalar_out
- tags: pointwise
- - func: div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: div
- tags: pointwise
- - func: div_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)
- variants: method
- dispatch:
- CompositeExplicitAutograd: div_
- autogen: div.Scalar_mode_out
- tags: pointwise
- # divide, alias for div
- - func: divide.Tensor(Tensor self, Tensor other) -> Tensor
- variants: function, method
- - func: divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
- variants: method
- - func: divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- - func: divide.Scalar(Tensor self, Scalar other) -> Tensor
- variants: function, method
- - func: divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
- variants: method
- - func: divide.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor
- variants: function, method
- - func: divide_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)
- variants: method
- - func: divide.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)
- - func: divide.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor
- variants: function, method
- - func: divide_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)
- variants: method
- # true_divide, an alias for div
- - func: true_divide.Tensor(Tensor self, Tensor other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- tags: pointwise
- - func: true_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- - func: true_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- - func: true_divide.Scalar(Tensor self, Scalar other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- - func: true_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- - func: dot(Tensor self, Tensor tensor) -> Tensor
- variants: function, method
- dispatch:
- CPU: dot
- CUDA: dot_cuda
- MPS: dot_mps
- - func: dot.out(Tensor self, Tensor tensor, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CompositeExplicitAutograd: dot_out
- - func: vdot(Tensor self, Tensor other) -> Tensor
- variants: function, method
- dispatch:
- CPU: vdot
- CUDA: vdot_cuda
- - func: vdot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CompositeExplicitAutograd: vdot_out
- - func: einsum(str equation, Tensor[] tensors, *, int[]? path=None) -> Tensor
- - func: embedding(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor
- dispatch:
- CompositeExplicitAutograd: embedding_symint
- NestedTensorCPU, NestedTensorCUDA: NestedTensor_embedding
- autogen: embedding.out
- - func: embedding_backward(Tensor grad, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, bool sparse) -> Tensor
- dispatch:
- CompositeImplicitAutograd: embedding_backward_symint
- - func: embedding_dense_backward(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq) -> Tensor
- dispatch:
- CPU: embedding_dense_backward_cpu
- CUDA: embedding_dense_backward_cuda
- MPS: embedding_dense_backward_mps
- autogen: embedding_dense_backward.out
- tags: core
- - func: embedding_renorm_(Tensor(a!) self, Tensor indices, float max_norm, float norm_type) -> Tensor(a!)
- dispatch:
- CPU: embedding_renorm_cpu_
- CUDA: embedding_renorm_cuda_
- autogen: embedding_renorm, embedding_renorm.out
- - func: embedding_sparse_backward(Tensor grad, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor
- # NOTE [ embedding_bag Native Functions ]
- # The `_embedding_bag.*` variants assume that input tensors except for `weight`,
- # e.g. `indices` and `offsets` (and `offset2bag`), are contiguous.
- # We really only need to enforce this for `_embedding_bag` (the forward) because
- # the backward inputs are the same as forward ones.
- # The above `embedding_bag` wrapper is created to achieve this, e.g.,
- # applying indices = indices.contiguous().
- # The backward functions apply a check that these input tensors are contiguous.
- - func: _embedding_bag_forward_only(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor)
- dispatch:
- CPU: _embedding_bag_forward_only_cpu
- CUDA: _embedding_bag_forward_only_cuda
- autogen: _embedding_bag_forward_only.out
- - func: _rowwise_prune(Tensor weight, Tensor mask, ScalarType compressed_indices_dtype) -> (Tensor, Tensor)
- # row_stack is the alias of vstack
- - func: row_stack(Tensor[] tensors) -> Tensor
- - func: row_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
- - func: embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False) -> (Tensor, Tensor, Tensor, Tensor)
- # To keep backward and forward compatibility, and to avoid ambiguity with the
- # original signature above, scale_grad_by_freq, mode, sparse,
- # per_sample_weights, and include_last_offset parameters do not have default
- # values. Once the original signature is removed, default values can be added.
- - func: embedding_bag.padding_idx(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, bool include_last_offset, int? padding_idx) -> (Tensor, Tensor, Tensor, Tensor)
- - func: _embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor)
- dispatch:
- CPU: _embedding_bag_cpu
- CUDA: _embedding_bag_cuda
- autogen: _embedding_bag.out
- - func: _embedding_bag_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
- dispatch:
- CompositeImplicitAutograd: _embedding_bag_backward_symint
- - func: _embedding_bag_sparse_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
- dispatch:
- CompositeImplicitAutograd: _embedding_bag_sparse_backward_symint
- - func: _embedding_bag_dense_backward(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
- dispatch:
- CPU: _embedding_bag_dense_backward_cpu
- CUDA: _embedding_bag_dense_backward_cuda
- autogen: _embedding_bag_dense_backward.out
- - func: _embedding_bag_per_sample_weights_backward(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1) -> Tensor
- dispatch:
- CPU: _embedding_bag_per_sample_weights_backward_cpu
- CUDA: _embedding_bag_per_sample_weights_backward_cuda
- autogen: _embedding_bag_per_sample_weights_backward.out
- - func: empty.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
- device_check: NoCheck
- device_guard: False
- dispatch:
- CompositeExplicitAutograd: empty_names
- autogen: empty.names_out
- - func: empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
- dispatch:
- CPU: empty_cpu
- CUDA: empty_cuda
- MPS: empty_mps
- Meta: empty_meta_symint
- MkldnnCPU: empty_mkldnn
- SparseCPU, SparseCUDA, SparseMeta: empty_sparse
- SparseCsrCPU, SparseCsrCUDA: empty_sparse_compressed
- QuantizedCPU, QuantizedCUDA, QuantizedMeta: empty_unknown_quantized
- # We do not make new_empty a composite that calls into new_empty_strided, as the strided version
- # is significantly more difficult to implement by different backends
- - func: new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- variants: method
- dispatch:
- CompositeExplicitAutograd: new_empty_symint
- autogen: new_empty.out
- - func: new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- variants: method
- dispatch:
- CompositeExplicitAutogradNonFunctional: new_empty_strided_symint
- autogen: new_empty_strided.out
- - func: new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- variants: method
- dispatch:
- # NB: Although this composite mutates on the inside, it is
- # non-differentiable so NonFunctional doesn't apply
- CompositeExplicitAutograd: new_full
- autogen: new_full.out
- - func: new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- variants: method
- dispatch:
- # NB: Although this composite mutates on the inside, it is
- # non-differentiable so NonFunctional doesn't apply
- CompositeExplicitAutograd: new_zeros
- autogen: new_zeros.out
- - func: new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- variants: method
- dispatch:
- # NB: Although this composite mutates on the inside, it is
- # non-differentiable so NonFunctional doesn't apply
- CompositeExplicitAutograd: new_ones
- autogen: new_ones.out
- # other overrides are to provide a more helpful error message that dtype is required
- - func: _empty_affine_quantized(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor
- dispatch:
- CPU: empty_affine_quantized_other_backends_stub
- QuantizedCPU, QuantizedCUDA: empty_affine_quantized
- autogen: _empty_affine_quantized.out
- # it's a factory function receiving a tensor argument, thus overriding explicitly
- # other overrides are to provide a more helpful error message that dtype is required
- - func: _empty_per_channel_affine_quantized(int[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor
- category_override: factory
- dispatch:
- CPU: empty_per_channel_affine_quantized_other_backends_stub
- QuantizedCPU, QuantizedCUDA: empty_per_channel_affine_quantized
- autogen: _empty_per_channel_affine_quantized.out
- - func: resize_(Tensor(a!) self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!)
- use_const_ref_for_mutable_tensors: True
- variants: method
- device_check: NoCheck
- device_guard: False
- tags: inplace_view
- dispatch:
- Meta: resize__symint
- CPU: resize_
- CUDA: resize_cuda_
- MPS: resize_mps_
- QuantizedCPU: quantized_resize_cpu_
- SparseCsrCPU, SparseCsrCUDA: resize_sparse_csr_
- autogen: resize, resize.out
- # This is a utility function to enable users to resize out tensor while registering kernels for out variants.
- # Eventually, we can consider exposing `resize_output` as a public API to ship it with python op registration
- # to make it easy to register out variants for ops.
- - func: _resize_output_(Tensor(a!) self, int[] size, Device device) -> Tensor(a!)
- use_const_ref_for_mutable_tensors: True
- variants: function
- dispatch:
- Meta: _resize_output_
- autogen: _resize_output, _resize_output.out
- - func: empty_quantized(int[] size, Tensor qtensor, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
- category_override: factory
- variants: function
- dispatch:
- QuantizedCPU, QuantizedCUDA: empty_quantized
- autogen: empty_quantized.out
- - func: empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck
- device_guard: False
- - func: empty_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
- device_check: NoCheck
- device_guard: False
- dispatch:
- CompositeExplicitAutograd: empty_like
- QuantizedCPU, QuantizedCUDA: empty_like_quantized
- SparseCPU, SparseCUDA, SparseMeta: empty_like_sparse_coo
- SparseCsrCPU, SparseCsrCUDA: empty_like_sparse_csr
- NestedTensorCPU, NestedTensorCUDA: empty_like_nested
- autogen: empty_like.out
- - func: empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- dispatch:
- CPU: empty_strided_cpu
- CUDA: empty_strided_cuda
- MPS: empty_strided_mps
- Meta: empty_strided_meta_symint
- QuantizedCPU, QuantizedCUDA: empty_strided_unknown_quantized
- autogen: empty_strided.out
- tags: core
- - func: erf(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: erf.out
- variants: function, method
- dispatch:
- SparseCPU, SparseCUDA: erf_sparse
- SparseCsrCPU, SparseCsrCUDA: erf_sparse_csr
- tags: [core, pointwise]
- - func: erf_(Tensor(a!) self) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured_delegate: erf.out
- variants: function, method
- dispatch:
- SparseCPU, SparseCUDA: erf_sparse_
- SparseCsrCPU, SparseCsrCUDA: erf_sparse_csr_
- tags: pointwise
- - func: erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: erf_out
- MPS: erf_out_mps
- SparseCPU, SparseCUDA: erf_sparse_out
- SparseCsrCPU, SparseCsrCUDA: erf_sparse_csr_out
- tags: pointwise
- - func: erfc(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: erfc.out
- variants: function, method
- tags: pointwise
- - func: erfc_(Tensor(a!) self) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured_delegate: erfc.out
- variants: function, method
- tags: pointwise
- - func: erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: erfc_out
- tags: pointwise
- - func: exp(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: exp.out
- variants: function, method
- tags: [core, pointwise]
- - func: exp_(Tensor(a!) self) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured_delegate: exp.out
- variants: function, method
- tags: pointwise
- - func: exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: exp_out
- MPS: exp_out_mps
- tags: pointwise
- - func: exp2(Tensor self) -> Tensor
- structured_delegate: exp2.out
- variants: function, method
- tags: pointwise
- - func: exp2_(Tensor(a!) self) -> Tensor(a!)
- structured_delegate: exp2.out
- variants: function, method
- tags: pointwise
- - func: exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: exp2_out
- MPS: exp2_out_mps
- tags: pointwise
- - func: expm1(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: expm1.out
- variants: function, method
- dispatch:
- SparseCPU, SparseCUDA: expm1_sparse
- SparseCsrCPU, SparseCsrCUDA: expm1_sparse_csr
- tags: pointwise
- - func: expm1_(Tensor(a!) self) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured_delegate: expm1.out
- variants: function, method
- dispatch:
- SparseCPU, SparseCUDA: expm1_sparse_
- SparseCsrCPU, SparseCsrCUDA: expm1_sparse_csr_
- tags: pointwise
- - func: expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: expm1_out
- MPS: expm1_out_mps
- SparseCPU, SparseCUDA: expm1_sparse_out
- SparseCsrCPU, SparseCsrCUDA: expm1_sparse_csr_out
- tags: pointwise
- - func: expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a)
- variants: method # This is method-only to match the previous tensor API. In the future we could make this a function too.
- device_check: NoCheck
- device_guard: False
- dispatch:
- CompositeExplicitAutograd: expand
- tags: core
- - func: expand_as(Tensor(a) self, Tensor other) -> Tensor(a)
- variants: method # This is method-only to match the previous tensor API. In the future we could make this a function too.
- device_check: NoCheck
- device_guard: False
- # decomposes to eye.m
- - func: eye(int n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- dispatch:
- CompositeExplicitAutograd: eye
- - func: eye.m(int n, int m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- dispatch:
- CompositeExplicitAutograd: eye
- - func: eye.out(int n, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU, Meta: eye_out_cpu
- CUDA: eye_out_cuda
- MPS: eye_out_mps
- - func: eye.m_out(int n, int m, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU, Meta: eye_out_cpu
- CUDA: eye_out_cuda
- MPS: eye_out_mps
- - func: flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a)
- variants: function, method
- - func: flatten.named_out_dim(Tensor(a) self, int start_dim, int end_dim, Dimname out_dim) -> Tensor(a)
- variants: function, method
- - func: flatten.using_names(Tensor(a) self, Dimname start_dim, Dimname end_dim, Dimname out_dim) -> Tensor(a)
- variants: function, method
- - func: flatten.DimnameList(Tensor(a) self, Dimname[] dims, Dimname out_dim) -> Tensor(a)
- variants: function, method
- - func: unflatten.int(Tensor(a) self, int dim, int[] sizes) -> Tensor(a)
- variants: function, method
- - func: unflatten.Dimname(Tensor(a) self, Dimname dim, int[] sizes, Dimname[] names) -> Tensor(a)
- variants: function, method
- - func: fill.Scalar(Tensor self, Scalar value) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutograd: fill
- tags: core
- - func: fill.Tensor(Tensor self, Tensor value) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutograd: fill
- - func: fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- CPU, CUDA: fill_
- MPS: fill_scalar_mps
- QuantizedCPU, QuantizedCUDA: fill_quantized_
- Meta: fill_meta_
- SparseCsrCPU, SparseCsrCUDA: fill_sparse_csr_
- NestedTensorCPU, NestedTensorCUDA: fill_nested_
- autogen: fill.Scalar_out
- - func: fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- CPU, CUDA: fill_
- MPS: fill_tensor_mps_
- QuantizedCPU, QuantizedCUDA: fill_quantized_
- Meta: fill_meta_
- NestedTensorCPU, NestedTensorCUDA: fill_nested_
- autogen: fill.Tensor_out
- - func: floor(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: floor.out
- variants: function, method
- dispatch:
- SparseCPU, SparseCUDA: floor_sparse
- SparseCsrCPU, SparseCsrCUDA: floor_sparse_csr
- tags: [core, pointwise]
- - func: floor_(Tensor(a!) self) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured_delegate: floor.out
- variants: function, method
- dispatch:
- SparseCPU, SparseCUDA: floor_sparse_
- SparseCsrCPU, SparseCsrCUDA: floor_sparse_csr_
- tags: pointwise
- - func: floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: floor_out
- MPS: floor_out_mps
- SparseCPU, SparseCUDA: floor_sparse_out
- SparseCsrCPU, SparseCsrCUDA: floor_sparse_csr_out
- tags: pointwise
- - func: floor_divide(Tensor self, Tensor other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- CPU, CUDA: floor_divide
- MPS: floor_divide_mps
- SparseCPU, SparseCUDA: floor_divide_sparse
- - func: floor_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- dispatch:
- CPU, CUDA: floor_divide_
- MPS: floor_divide_mps_
- SparseCPU, SparseCUDA: floor_divide_sparse_
- - func: floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU, CUDA: floor_divide_out
- MPS: floor_divide_out_mps
- SparseCPU, SparseCUDA: floor_divide_out_sparse_zerodim
- - func: floor_divide.Scalar(Tensor self, Scalar other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- - func: floor_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- - func: frac(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: frac.out
- variants: function, method
- dispatch:
- SparseCPU, SparseCUDA: frac_sparse
- SparseCsrCPU, SparseCsrCUDA: frac_sparse_csr
- tags: pointwise
- - func: frac_(Tensor(a!) self) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured_delegate: frac.out
- variants: function, method
- dispatch:
- SparseCPU, SparseCUDA: frac_sparse_
- SparseCsrCPU, SparseCsrCUDA: frac_sparse_csr_
- tags: pointwise
- - func: frac.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: frac_out
- MPS: frac_out_mps
- SparseCPU, SparseCUDA: frac_sparse_out
- SparseCsrCPU, SparseCsrCUDA: frac_sparse_csr_out
- tags: pointwise
- - func: full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- device_check: NoCheck
- device_guard: False
- dispatch:
- CompositeExplicitAutograd: full
- autogen: full.names_out
- - func: full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- dispatch:
- CompositeExplicitAutograd: full
- tags: core
- - func: full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CompositeExplicitAutograd: full_out
- - func: full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
- dispatch:
- # NB: Although this composite mutates on the inside, it is
- # non-differentiable so NonFunctional doesn't apply
- CompositeExplicitAutograd: full_like
- autogen: full_like.out
- - func: from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- dispatch:
- CPU: from_file
- autogen: from_file.out
- - func: gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: gcd_out
- tags: pointwise
- - func: gcd(Tensor self, Tensor other) -> Tensor
- structured_delegate: gcd.out
- variants: function, method
- tags: pointwise
- - func: gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!)
- structured_delegate: gcd.out
- variants: function, method
- - func: lcm.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: lcm_out
- tags: pointwise
- - func: lcm(Tensor self, Tensor other) -> Tensor
- structured_delegate: lcm.out
- variants: function, method
- tags: pointwise
- - func: lcm_(Tensor(a!) self, Tensor other) -> Tensor(a!)
- structured_delegate: lcm.out
- variants: function, method
- # NOTE [ grid_sampler Native Functions ]
- # `grid_sampler` is _supposed to_ do all the shape checking and then dispatch to
- # one of `cudnn_grid_sampler`, `grid_sampler_2d`, or `grid_sampler_3d`, each of
- # which has the corresponding backward defined as native functions as well.
- # However, we do shape checking everywhere for now since each of the mentioned
- # functions can be called directly, which will lead to crashes otherwise.
- # See https://github.com/pytorch/pytorch/issues/73187 for more information.
- #
- # There is also _grid_sampler_2d_backward_cpu_fallback which is an
- # implementation detail of grid_sampler_2d and is only exposed here for testing
- # purposes.
- #
- # Additionally, arguments `padding_mode` and `interpolation_mode` are cast to
- # enums defined in `native/GridSampler.h`. `cudnn_grid_sampler` doesn't take in
- # `interpolation_mode` because it only supports Bilinear interpolation mode.
- # Nor does it take in `align_corners` because it only supports the mode
- # `align_corners = True`.
- - func: grid_sampler(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
- - func: grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
- dispatch:
- CPU, QuantizedCPU: grid_sampler_2d_cpu
- CUDA: grid_sampler_2d_cuda
- MPS: grid_sampler_2d_mps
- autogen: grid_sampler_2d.out
- tags: core
- # `grid_sampler_2d_backward` takes in `output_mask` to optimize performance for
- # the case where `input` doesn't require gradient. Gradient for `grid` is always
- # computed (only `output_mask[0]` is checked by the implementations).
- - func: grid_sampler_2d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor)
- dispatch:
- CPU: grid_sampler_2d_backward_cpu
- CUDA: grid_sampler_2d_backward_cuda
- autogen: grid_sampler_2d_backward.out
- # See NOTE [ grid_sample CPU fallback ]
- - func: _grid_sampler_2d_cpu_fallback(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
- dispatch:
- CompositeExplicitAutograd: _grid_sampler_2d_cpu_fallback
- autogen: _grid_sampler_2d_cpu_fallback.out
- - func: _grid_sampler_2d_cpu_fallback_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor, Tensor)
- - func: grid_sampler_3d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
- dispatch:
- CPU: grid_sampler_3d_cpu
- CUDA: grid_sampler_3d_cuda
- autogen: grid_sampler_3d.out
- # `grid_sampler_3d_backward` takes in `output_mask` to optimize performance for
- # the case where `input` doesn't require gradient. Gradient for `grid` is always
- # computed (only `output_mask[0]` is checked by the implementations).
- - func: grid_sampler_3d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor)
- dispatch:
- CPU: grid_sampler_3d_backward_cpu
- CUDA: grid_sampler_3d_backward_cuda
- autogen: grid_sampler_3d_backward.out
- - func: hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- dispatch:
- CompositeExplicitAutograd: hann_window
- autogen: hann_window.out
- - func: hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- dispatch:
- CompositeExplicitAutograd: hann_window
- autogen: hann_window.periodic_out
- - func: hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- dispatch:
- CompositeExplicitAutograd: hamming_window
- autogen: hamming_window.out
- - func: hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- dispatch:
- CompositeExplicitAutograd: hamming_window
- autogen: hamming_window.periodic_out
- - func: hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- dispatch:
- CompositeExplicitAutograd: hamming_window
- autogen: hamming_window.periodic_alpha_out
- - func: hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- dispatch:
- CompositeExplicitAutograd: hamming_window
- autogen: hamming_window.periodic_alpha_beta_out
- - func: kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- dispatch:
- CompositeExplicitAutograd: kaiser_window
- autogen: kaiser_window.out
- - func: kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- dispatch:
- CompositeExplicitAutograd: kaiser_window
- autogen: kaiser_window.periodic_out
- - func: kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- dispatch:
- CompositeExplicitAutograd: kaiser_window
- autogen: kaiser_window.beta_out
- - func: hinge_embedding_loss(Tensor self, Tensor target, float margin=1.0, int reduction=Mean) -> Tensor
- - func: group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enabled=True) -> Tensor
- - func: native_group_norm(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps) -> (Tensor, Tensor, Tensor)
- dispatch:
- CPU, CUDA: native_group_norm
- CompositeExplicitAutograd: math_group_norm
- autogen: native_group_norm.out
- tags: core
- - func: native_group_norm_backward(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
- dispatch:
- CPU, CUDA: native_group_norm_backward
- autogen: native_group_norm_backward.out
- tags: core
- # Real to complex forward FFT
- - func: _fft_r2c(Tensor self, int[] dim, int normalization, bool onesided) -> Tensor
- variants: function
- dispatch:
- CPU: _fft_r2c_mkl
- CUDA: _fft_r2c_cufft
- - func: _fft_r2c.out(Tensor self, int[] dim, int normalization, bool onesided, *, Tensor(a!) out) -> Tensor(a!)
- variants: function
- dispatch:
- CPU: _fft_r2c_mkl_out
- CUDA: _fft_r2c_cufft_out
- # Complex to real inverse FFT
- - func: _fft_c2r(Tensor self, int[] dim, int normalization, int last_dim_size) -> Tensor
- variants: function
- dispatch:
- CPU: _fft_c2r_mkl
- CUDA: _fft_c2r_cufft
- - func: _fft_c2r.out(Tensor self, int[] dim, int normalization, int last_dim_size, *, Tensor(a!) out) -> Tensor(a!)
- variants: function
- dispatch:
- CPU: _fft_c2r_mkl_out
- CUDA: _fft_c2r_cufft_out
- # Standard complex to complex FFT (forward or backward)
- - func: _fft_c2c(Tensor self, SymInt[] dim, int normalization, bool forward) -> Tensor
- variants: function
- dispatch:
- CPU: _fft_c2c_mkl
- CUDA: _fft_c2c_cufft
- - func: _fft_c2c.out(Tensor self, SymInt[] dim, int normalization, bool forward, *, Tensor(a!) out) -> Tensor(a!)
- variants: function
- dispatch:
- CPU: _fft_c2c_mkl_out
- CUDA: _fft_c2c_cufft_out
- - func: _validate_compressed_sparse_indices(bool is_crow, Tensor compressed_idx, Tensor plain_idx, int cdim, int dim, int nnz) -> ()
- device_check: NoCheck
- variants: function
- dispatch:
- CPU: _validate_compressed_sparse_indices_cpu
- CUDA: _validate_compressed_sparse_indices_cuda
- - func: _cufft_get_plan_cache_size(int device_index) -> int
- - func: _cufft_get_plan_cache_max_size(int device_index) -> int
- - func: _cufft_set_plan_cache_max_size(int device_index, int max_size) -> ()
- - func: _cufft_clear_plan_cache(int device_index) -> ()
- - func: index.Tensor(Tensor self, Tensor?[] indices) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: index.Tensor_out
- variants: function, method
- dispatch:
- QuantizedCPU: quantized_index
- tags: dynamic_output_shape
- # NB: This function is special-cased in tools/autograd/gen_variable_type.py
- # NB: The following functions are declared in aten/src/ATen/templates/TensorBody.h and defined in aten/src/ATen/TensorIndexing.cpp:
- # - Tensor Tensor::index(ArrayRef<TensorIndex> indices)
- # - Tensor Tensor::index(std::initializer_list<TensorIndex> indices)
- - func: index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck
- structured: True
- structured_inherits: TensorIteratorBase
- precomputed:
- - indices -> DimVector sizes, DimVector strides
- dispatch:
- CPU, CUDA, MPS: index_out
- - func: index_copy.out(Tensor self, int dim, Tensor index, Tensor source, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- variants: function
- precomputed:
- - dim -> int dim
- dispatch:
- CPU, CUDA: index_copy_out
- - func: index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!)
- variants: method
- structured_delegate: index_copy.out
- - func: index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor
- variants: function, method
- structured_delegate: index_copy.out
- - func: index_copy_.dimname(Tensor(a!) self, Dimname dim, Tensor index, Tensor source) -> Tensor(a!)
- variants: method
- - func: index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor
- variants: function, method
- - func: index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!)
- device_check: NoCheck # delegate to _index_put_impl_, which leverages TensorIterator
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: index_put_
- autogen: index_put.out
- # NB: The following functions are declared in aten/src/ATen/templates/TensorBody.h and defined in aten/src/ATen/TensorIndexing.cpp:
- # - Tensor & Tensor::index_put_(ArrayRef<TensorIndex> indices, Tensor const & rhs)
- # - Tensor & Tensor::index_put_(ArrayRef<TensorIndex> indices, Scalar v)
- # - Tensor & Tensor::index_put_(std::initializer_list<TensorIndex> indices, Tensor const & rhs)
- # - Tensor & Tensor::index_put_(std::initializer_list<TensorIndex> indices, Scalar v)
- - func: index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor
- device_check: NoCheck # delegate to _index_put_impl_ after clone, which leverages TensorIterator
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: index_put
- - func: _index_put_impl_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: function
- dispatch:
- CPU, CUDA, MPS: _index_put_impl_
- QuantizedCPU: _index_put_impl_quantized_cpu_
- QuantizedCUDA: _index_put_impl_quantized_cuda_
- autogen: _index_put_impl, _index_put_impl.out
- - func: instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor
- variants: function
- - func: isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor
- variants: function, method
- - func: isin.Tensor_Tensor_out(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)
- variants: function
- structured: True
- dispatch:
- CPU, CUDA: isin_Tensor_Tensor_out
- - func: isin.Tensor_Tensor(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor
- variants: function
- structured_delegate: isin.Tensor_Tensor_out
- - func: isin.Tensor_Scalar_out(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)
- variants: function
- structured: True
- dispatch:
- CPU, CUDA: isin_Tensor_Scalar_out
- - func: isin.Tensor_Scalar(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False) -> Tensor
- variants: function
- structured_delegate: isin.Tensor_Scalar_out
- - func: isin.Scalar_Tensor_out(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)
- variants: function
- structured: True
- dispatch:
- CPU, CUDA: isin_Scalar_Tensor_out
- - func: isin.Scalar_Tensor(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor
- variants: function
- structured_delegate: isin.Scalar_Tensor_out
- - func: isnan(Tensor self) -> Tensor
- variants: function, method
- device_check: NoCheck
- device_guard: False
- dispatch:
- CPU, CUDA, MPS: isnan
- SparseCPU, SparseCUDA: isnan_sparse
- SparseCsrCPU, SparseCsrCUDA: isnan_sparse_csr
- autogen: isnan.out
- tags: [core, pointwise]
- - func: is_distributed(Tensor self) -> bool
- variants: function, method
- device_check: NoCheck
- device_guard: False
- - func: is_floating_point(Tensor self) -> bool
- variants: function, method
- device_check: NoCheck
- device_guard: False
- manual_cpp_binding: True
- - func: is_complex(Tensor self) -> bool
- variants: function, method
- device_check: NoCheck
- device_guard: False
- manual_cpp_binding: True
- - func: is_conj(Tensor self) -> bool
- variants: function, method
- device_guard: False
- manual_cpp_binding: True
- - func: _is_zerotensor(Tensor self) -> bool
- variants: function, method
- device_guard: False
- manual_cpp_binding: True
- - func: is_neg(Tensor self) -> bool
- variants: function, method
- device_guard: False
- manual_cpp_binding: True
- - func: isreal(Tensor self) -> Tensor
- variants: function, method
- - func: is_nonzero(Tensor self) -> bool
- variants: function, method
- device_check: NoCheck
- device_guard: False
- - func: is_same_size(Tensor self, Tensor other) -> bool
- variants: function, method
- device_check: NoCheck
- device_guard: False
- dispatch:
- NestedTensorCPU, NestedTensorCUDA: nested_is_same_size
- CompositeExplicitAutograd: is_same_size
- - func: is_signed(Tensor self) -> bool
- variants: function, method
- device_check: NoCheck
- device_guard: False
- manual_cpp_binding: True
- - func: is_inference(Tensor self) -> bool
- variants: function, method
- device_check: NoCheck
- device_guard: False
- manual_cpp_binding: True
- - func: kl_div(Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor
- - func: kron(Tensor self, Tensor other) -> Tensor
- variants: function, method
- - func: kron.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- - func: kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: kthvalue
- - func: kthvalue.values(Tensor self, int k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
- dispatch:
- CPU: kthvalue_out_cpu
- CUDA: kthvalue_out_cuda
- - func: kthvalue.dimname(Tensor self, int k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
- variants: function, method
- - func: kthvalue.dimname_out(Tensor self, int k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
- - func: layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor
- dispatch:
- CompositeImplicitAutograd: layer_norm_symint
- - func: native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor)
- dispatch:
- CPU: layer_norm_cpu
- CUDA: layer_norm_cuda
- MPS: layer_norm_mps
- CompositeExplicitAutograd: math_native_layer_norm
- NestedTensorCPU, NestedTensorCUDA: nested_layer_norm
- autogen: native_layer_norm.out
- tags: core
- - func: native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
- dispatch:
- CPU: layer_norm_backward_cpu
- CUDA: layer_norm_backward_cuda
- MPS: layer_norm_backward_mps
- autogen: native_layer_norm_backward.out
- tags: core
- - func: nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: nan_to_num
- SparseCPU, SparseCUDA: nan_to_num_sparse
- tags: pointwise
- - func: nan_to_num_(Tensor(a!) self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor(a!)
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: nan_to_num_
- SparseCPU, SparseCUDA: nan_to_num_sparse_
- tags: pointwise
- - func: nan_to_num.out(Tensor self, float? nan=None, float? posinf=None, float? neginf=None, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU, CUDA: nan_to_num_out
- MPS: nan_to_num_out_mps
- SparseCPU, SparseCUDA: nan_to_num_sparse_out
- tags: pointwise
- - func: linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor
- python_module: nn
- dispatch:
- CompositeImplicitAutograd: linear
- NestedTensorCPU, NestedTensorCUDA: nested_linear
- MPS: _mps_linear
- - func: linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
- dispatch:
- NestedTensorCPU, NestedTensorCUDA: nested_linear_backward
- MPS: mps_linear_backward
- autogen: linear_backward.out
- - func: linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)
- python_module: nn
- dispatch:
- CompositeExplicitAutograd: linear_out
- - func: mkldnn_linear(Tensor self, Tensor weight, Tensor? bias=None) -> Tensor
- python_module: nn
- dispatch:
- MkldnnCPU: mkldnn_linear
- autogen: mkldnn_linear.out
- - func: mkldnn_linear_backward_input(int[] input_size, Tensor grad_output, Tensor weight) -> Tensor
- dispatch:
- MkldnnCPU: mkldnn_linear_backward_input
- autogen: mkldnn_linear_backward_input.out
- - func: mkldnn_linear_backward_weights(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined) -> (Tensor, Tensor)
- dispatch:
- MkldnnCPU: mkldnn_linear_backward_weights
- autogen: mkldnn_linear_backward_weights.out
- - func: mkldnn_linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
- dispatch:
- MkldnnCPU: mkldnn_linear_backward
- autogen: mkldnn_linear_backward.out
- - func: fbgemm_linear_int8_weight_fp32_activation(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor
- - func: fbgemm_linear_int8_weight(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor
- - func: fbgemm_linear_quantize_weight(Tensor input) -> (Tensor, Tensor, float, int)
- - func: fbgemm_pack_gemm_matrix_fp16(Tensor input) -> Tensor
- - func: fbgemm_linear_fp16_weight_fp32_activation(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor
- - func: fbgemm_linear_fp16_weight(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor
- - func: fbgemm_pack_quantized_matrix(Tensor input) -> Tensor
- - func: fbgemm_pack_quantized_matrix.KN(Tensor input, int K, int N) -> Tensor
- - func: ldexp.Tensor(Tensor self, Tensor other) -> Tensor
- variants: function, method
- - func: ldexp_(Tensor(a!) self, Tensor other) -> Tensor(a!)
- variants: function, method
- tags: pointwise
- - func: ldexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- tags: pointwise
- - func: linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- dispatch:
- CompositeExplicitAutograd: linspace
- - func: linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU, Meta: linspace_out
- CUDA: linspace_cuda_out
- MPS: linspace_out_mps
- - func: log(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: log.out
- variants: function, method
- tags: [core, pointwise]
- - func: log_(Tensor(a!) self) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured_delegate: log.out
- variants: function, method
- tags: pointwise
- - func: log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: log_out
- MPS: log_out_mps
- tags: pointwise
- - func: log10(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: log10.out
- variants: function, method
- tags: pointwise
- - func: log10_(Tensor(a!) self) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured_delegate: log10.out
- variants: function, method
- tags: pointwise
- - func: log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: log10_out
- MPS: log10_out_mps
- tags: pointwise
- - func: log1p(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: log1p.out
- variants: function, method
- dispatch:
- SparseCPU, SparseCUDA: log1p_sparse
- SparseCsrCPU, SparseCsrCUDA: log1p_sparse_csr
- tags: pointwise
- - func: log1p_(Tensor(a!) self) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured_delegate: log1p.out
- variants: function, method
- dispatch:
- SparseCPU, SparseCUDA: log1p_sparse_
- SparseCsrCPU, SparseCsrCUDA: log1p_sparse_csr_
- tags: pointwise
- - func: log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: log1p_out
- MPS: log1p_out_mps
- SparseCPU, SparseCUDA: log1p_sparse_out
- SparseCsrCPU, SparseCsrCUDA: log1p_sparse_csr_out
- tags: pointwise
- - func: log2(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: log2.out
- variants: function, method
- tags: pointwise
- - func: log2_(Tensor(a!) self) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured_delegate: log2.out
- variants: function, method
- tags: pointwise
- - func: log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: log2_out
- MPS: log2_out_mps
- tags: pointwise
- - func: logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: logaddexp_out
- MPS: logaddexp_out_mps
- tags: pointwise
- - func: logaddexp(Tensor self, Tensor other) -> Tensor
- variants: method, function
- structured_delegate: logaddexp.out
- tags: pointwise
- - func: logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: logaddexp2_out
- MPS: logaddexp2_out_mps
- tags: pointwise
- - func: logaddexp2(Tensor self, Tensor other) -> Tensor
- variants: method, function
- structured_delegate: logaddexp2.out
- tags: pointwise
- - func: xlogy.Tensor(Tensor self, Tensor other) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: xlogy.OutTensor
- variants: function, method
- tags: pointwise
- - func: xlogy.Scalar_Self(Scalar self, Tensor other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function
- dispatch:
- CompositeExplicitAutograd: xlogy
- tags: pointwise
- - func: xlogy.Scalar_Other(Tensor self, Scalar other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: xlogy
- tags: pointwise
- # xlogy: inplace variant
- - func: xlogy_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: function, method
- structured_delegate: xlogy.OutTensor
- tags: pointwise
- - func: xlogy_.Scalar_Other(Tensor(a!) self, Scalar other) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: xlogy_
- # xlogy: out variant
- - func: xlogy.OutTensor(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- variants: function
- dispatch:
- CPU, CUDA: xlogy_out
- tags: pointwise
- - func: xlogy.OutScalar_Self(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: function
- dispatch:
- CompositeExplicitAutograd: xlogy_out
- tags: pointwise
- - func: xlogy.OutScalar_Other(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: function
- dispatch:
- CompositeExplicitAutograd: xlogy_out
- tags: pointwise
- - func: logspace(Scalar start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- dispatch:
- CompositeExplicitAutograd: logspace
- - func: logspace.out(Scalar start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU, Meta: logspace_out
- CUDA: logspace_cuda_out
- # log_softmax allows positional dtype, unlike most operators, because kwonly is BC-breaking when loading jit models.
- - func: log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
- variants: function, method
- - func: log_softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)
- variants: function
- dispatch:
- CompositeExplicitAutograd: log_softmax_out
- - func: log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
- variants: function, method
- - func: _log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
- structured_delegate: _log_softmax.out
- tags: core
- - func: _log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- dispatch:
- CPU: log_softmax_cpu_out
- CUDA: log_softmax_cuda_out
- MPS: log_softmax_mps_out
- - func: _log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor
- structured_delegate: _log_softmax_backward_data.out
- - func: _log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- dispatch:
- CPU: log_softmax_backward_cpu_out
- CUDA: log_softmax_backward_cuda_out
- MPS: log_softmax_backward_mps_out
- - func: _logcumsumexp(Tensor self, int dim) -> Tensor
- dispatch:
- CPU: _logcumsumexp_cpu
- CUDA: _logcumsumexp_cuda
- - func: _logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU: _logcumsumexp_out_cpu
- CUDA: _logcumsumexp_out_cuda
- - func: logcumsumexp(Tensor self, int dim) -> Tensor
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: logcumsumexp
- - func: logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CompositeExplicitAutograd: logcumsumexp_out
- - func: logcumsumexp.dimname(Tensor self, Dimname dim) -> Tensor
- variants: function, method
- - func: logcumsumexp.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
- - func: logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: logsumexp
- - func: logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- dispatch:
- # calls squeeze
- CompositeExplicitAutogradNonFunctional: logsumexp_out
- - func: logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- - func: logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- - func: margin_ranking_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor
- - func: matmul(Tensor self, Tensor other) -> Tensor
- variants: function, method
- dispatch:
- CompositeImplicitAutograd: matmul
- NestedTensorCPU, NestedTensorCUDA: matmul_nested
- - func: matmul_backward(Tensor grad, Tensor self, Tensor other, bool[2] mask) -> (Tensor, Tensor)
- dispatch:
- NestedTensorCPU, NestedTensorCUDA: matmul_backward_nested
- autogen: matmul_backward.out
- - func: matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CompositeImplicitAutograd: matmul_out
- NestedTensorCPU, NestedTensorCUDA: matmul_out_nested
- # Alias to linalg.matrix_power
- - func: matrix_power(Tensor self, int n) -> Tensor
- variants: function, method
- # Alias to linalg.matrix_power
- - func: matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!)
- # Alias to linalg.matrix_exp
- - func: matrix_exp(Tensor self) -> Tensor
- variants: function, method
- # This function should be deprecated in favor of differential_analytic_matrix_function in FunctionsManual.cpp
- - func: matrix_exp_backward(Tensor self, Tensor grad) -> Tensor
- # DEPRECATED: Use torch.aminmax instead
- - func: _aminmax(Tensor self) -> (Tensor, Tensor)
- dispatch:
- CPU, CUDA: _aminmax_all
- autogen: _aminmax.out
- # DEPRECATED: Use torch.aminmax instead
- - func: _aminmax.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor)
- dispatch:
- CPU, CUDA: _aminmax
- autogen: _aminmax.dim_out
- - func: aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max)
- device_check: NoCheck # TensorIterator
- structured_delegate: aminmax.out
- variants: function, method
- - func: aminmax.out(Tensor self, *, int? dim=None, bool keepdim=False, Tensor(a!) min, Tensor(b!) max) -> (Tensor(a!) min, Tensor(b!) max)
- device_check: NoCheck # TensorIterator
- structured: True
- dispatch:
- CPU, CUDA: aminmax_out
- - func: _compute_linear_combination(Tensor input, Tensor coefficients) -> Tensor
- dispatch:
- CPU, CUDA: _compute_linear_combination
- - func: _compute_linear_combination.out(Tensor input, Tensor coefficients, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU, CUDA: _compute_linear_combination_out
- - func: max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
- device_check: NoCheck # TensorIterator
- structured_delegate: max.dim_max
- variants: function, method
- dispatch:
- QuantizedCPU, QuantizedCUDA: qmax
- tags: core
- - func: max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)
- device_check: NoCheck # TensorIterator
- structured: True
- precomputed:
- - dim -> int dim
- dispatch:
- CPU, CUDA: max_out
- MPS: max_out_mps
- - func: max.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
- device_check: NoCheck # TensorIterator
- variants: function, method
- - func: max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)
- device_check: NoCheck # TensorIterator
- - func: value_selecting_reduction_backward(Tensor grad, int dim, Tensor indices, SymInt[] sizes, bool keepdim) -> Tensor
- variants: function
- device_check: NoCheck
- device_guard: False
- dispatch:
- CompositeImplicitAutograd: value_selecting_reduction_backward_symint
- - func: amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor
- variants: function, method
- structured_delegate: amax.out
- tags: core
- - func: amax.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- dispatch:
- CPU, CUDA: amax_out
- MPS: amax_out_mps
- # Return: (Tensor output, Tensor indices)
- - func: max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)
- - func: max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor
- - func: max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
- dispatch:
- CompositeImplicitAutograd: max_pool2d
- MPS: mps_max_pool2d
- - func: max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
- dispatch:
- MPS: mps_max_pool2d_backward
- autogen: max_pool2d_backward.out
- - func: mkldnn_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
- dispatch:
- MkldnnCPU: mkldnn_max_pool2d
- autogen: mkldnn_max_pool2d.out
- - func: mkldnn_max_pool2d_backward(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
- dispatch:
- MkldnnCPU: mkldnn_max_pool2d_backward
- autogen: mkldnn_max_pool2d_backward.out
- - func: mkldnn_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
- dispatch:
- MkldnnCPU: mkldnn_max_pool3d
- autogen: mkldnn_max_pool3d.out
- - func: mkldnn_max_pool3d_backward(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
- dispatch:
- MkldnnCPU: mkldnn_max_pool3d_backward
- autogen: mkldnn_max_pool3d_backward.out
- - func: quantized_max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor
- dispatch:
- QuantizedCPU: quantized_max_pool1d
- autogen: quantized_max_pool1d.out
- - func: quantized_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
- dispatch:
- QuantizedCPU: quantized_max_pool2d
- QuantizedCUDA: quantized_max_pool2d_cudnn
- autogen: quantized_max_pool2d.out
- - func: max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
- # The CPU and GPU dispatch variants are named weirdly here because otherwise there
- # are namespacing issues in C++
- - func: mean(Tensor self, *, ScalarType? dtype=None) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: mean
- # For normal naming convention this should be `mean.out`. However since we already have `mean.out` we have to rename this.
- # FIXME: fix CI jobs and re-enable this
- #- func: mean.dtype_out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
- # device_check: NoCheck # TensorIterator
- # dispatch:
- # CompositeExplicitAutograd: mean_dtype_out
- - func: mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
- structured_delegate: mean.out
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- QuantizedCPU: mean_quantized_cpu
- tags: core
- - func: mean.out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
- structured: True
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU, CUDA: mean_out
- MPS: mean_out_mps
- QuantizedCPU: mean_out_quantized_cpu
- - func: mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- - func: mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- - func: nanmean(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
- device_check: NoCheck # Composite
- variants: function, method
- - func: nanmean.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # Composite
- - func: median(Tensor self) -> Tensor
- variants: function, method
- dispatch:
- CPU: median_cpu
- CUDA: median_cuda
- MPS: median_mps
- autogen: median.out
- - func: median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: median
- - func: median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
- dispatch:
- CPU: median_out_cpu
- CUDA: median_out_cuda
- MPS: median_out_mps
- - func: median.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
- variants: function, method
- - func: median.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
- - func: nanmedian(Tensor self) -> Tensor
- variants: function, method
- dispatch:
- CPU: nanmedian_cpu
- CUDA: nanmedian_cuda
- autogen: nanmedian.out
- - func: nanmedian.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: nanmedian
- - func: nanmedian.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
- dispatch:
- CPU: nanmedian_out_cpu
- CUDA: nanmedian_out_cuda
- - func: nanmedian.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
- variants: function, method
- - func: nanmedian.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
- - func: min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
- device_check: NoCheck # TensorIterator
- structured_delegate: min.dim_min
- variants: function, method
- dispatch:
- QuantizedCPU, QuantizedCUDA: qmin
- tags: core
- - func: min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)
- device_check: NoCheck # TensorIterator
- structured: True
- precomputed:
- - dim -> int dim
- dispatch:
- CPU, CUDA: min_out
- MPS: min_out_mps
- - func: min.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
- device_check: NoCheck # TensorIterator
- variants: function, method
- - func: min.names_dim_min(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)
- device_check: NoCheck # TensorIterator
- - func: amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor
- variants: function, method
- structured_delegate: amin.out
- tags: core
- - func: amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- dispatch:
- CPU, CUDA: amin_out
- MPS: amin_out_mps
- # TODO: Add this function to MPS dispatch key so that we avoid declaring it in
- # native_functions.yaml
- # https://github.com/pytorch/pytorch/issues/77394
- - func: _mps_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups) -> Tensor
- dispatch:
- MPS: _mps_convolution
- autogen: _mps_convolution.out
- - func: mps_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
- dispatch:
- MPS: mps_convolution_backward
- autogen: mps_convolution_backward.out
- - func: mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups) -> Tensor
- dispatch:
- CompositeExplicitAutograd: mkldnn_convolution
- autogen: mkldnn_convolution.out
- - func: mkldnn_rnn_layer(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) -> (Tensor, Tensor, Tensor, Tensor)
- dispatch:
- CPU: mkldnn_rnn_layer
- autogen: mkldnn_rnn_layer.out
- - func: mkldnn_rnn_layer_backward(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor)
- dispatch:
- CPU: mkldnn_rnn_layer_backward
- autogen: mkldnn_rnn_layer_backward.out
- - func: miopen_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor)
- dispatch:
- CUDA: miopen_batch_norm
- autogen: miopen_batch_norm.out
- - func: miopen_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon) -> (Tensor, Tensor, Tensor)
- dispatch:
- CUDA: miopen_batch_norm_backward
- autogen: miopen_batch_norm_backward.out
- - func: miopen_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
- dispatch:
- CUDA: miopen_convolution
- autogen: miopen_convolution.out
- - func: miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
- dispatch:
- CUDA: miopen_convolution_transpose
- autogen: miopen_convolution_transpose.out
- - func: miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
- dispatch:
- CUDA: miopen_depthwise_convolution
- autogen: miopen_depthwise_convolution.out
- - func: miopen_convolution_relu(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor
- dispatch:
- CUDA: miopen_convolution_relu
- - func: miopen_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor
- dispatch:
- CUDA: miopen_convolution_add_relu
- - func: miopen_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
- dispatch:
- CUDA: miopen_rnn
- autogen: miopen_rnn.out
- - func: miopen_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])
- dispatch:
- CUDA: miopen_rnn_backward
- autogen: miopen_rnn_backward.out
- - func: mm(Tensor self, Tensor mat2) -> Tensor
- structured_delegate: mm.out
- variants: function, method
- dispatch:
- SparseCPU, SparseCUDA: _sparse_mm
- SparseCsrCPU, SparseCsrCUDA: _sparse_csr_mm
- tags: core
- - func: mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- dispatch:
- CPU: mm_out_cpu
- CUDA: mm_out_cuda
- MPS: mm_out_mps
- SparseCPU, SparseCUDA: _sparse_mm_out
- SparseCsrCPU, SparseCsrCUDA: _sparse_csr_mm_out
- - func: _sparse_mm(Tensor sparse, Tensor dense) -> Tensor
- python_module: sparse
- - func: _sparse_mm.reduce(Tensor sparse, Tensor dense, str reduce) -> Tensor
- python_module: sparse
- - func: _sparse_sparse_matmul(Tensor self, Tensor other) -> Tensor
- dispatch:
- SparseCPU: sparse_sparse_matmul_cpu
- SparseCUDA: sparse_sparse_matmul_cuda
- autogen: _sparse_sparse_matmul.out
- - func: mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)
- variants: function, method
- dispatch:
- CPU, CUDA: mode
- - func: mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
- dispatch:
- CompositeExplicitAutograd: mode_out
- - func: mode.dimname(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
- variants: function, method
- - func: mode.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
- - func: mul.Tensor(Tensor self, Tensor other) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: mul.out
- variants: function, method
- dispatch:
- SparseCPU, SparseCUDA: mul_sparse
- SparseCsrCPU, SparseCsrCUDA: mul_sparse_csr
- MkldnnCPU: mkldnn_mul
- ZeroTensor: mul_zerotensor
- NestedTensorCPU, NestedTensorCUDA: NestedTensor_mul_Tensor
- tags: [core, pointwise]
- - func: mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured_delegate: mul.out
- variants: method
- dispatch:
- SparseCPU, SparseCUDA: mul_sparse_
- SparseCsrCPU, SparseCsrCUDA: mul_sparse_csr_
- MkldnnCPU: mkldnn_mul_
- NestedTensorCPU, NestedTensorCUDA: NestedTensor_mul__Tensor
- tags: pointwise
- - func: mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: mul_out
- MPS: mul_out_mps
- SparseCPU: mul_out_sparse_cpu
- SparseCUDA: mul_out_sparse_cuda
- SparseCsrCPU, SparseCsrCUDA: mul_out_sparse_csr
- MkldnnCPU: mkldnn_mul_out
- tags: pointwise
- # For C++ only, until we have conversion from C++ numbers to Tensor
- - func: mul.Scalar(Tensor self, Scalar other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: mul
- SparseCsrCPU, SparseCsrCUDA: mul_scalar_sparse_csr
- NestedTensorCPU, NestedTensorCUDA: NestedTensor_mul_Scalar
- tags: [core, pointwise]
- - func: mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- dispatch:
- CompositeExplicitAutograd: mul_
- SparseCsrCPU, SparseCsrCUDA: mul__scalar_sparse_csr
- NestedTensorCPU, NestedTensorCUDA: NestedTensor_mul__Scalar
- autogen: mul.Scalar_out
- tags: pointwise
- # multiply, alias for mul
- - func: multiply.Tensor(Tensor self, Tensor other) -> Tensor
- variants: function, method
- - func: multiply_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
- variants: method
- - func: multiply.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- - func: multiply.Scalar(Tensor self, Scalar other) -> Tensor
- variants: function, method
- - func: multiply_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
- variants: method
- - func: mv(Tensor self, Tensor vec) -> Tensor
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: mv
- SparseCPU, SparseCUDA: mv_sparse
- - func: mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CompositeExplicitAutograd: mv_out
- - func: mvlgamma.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU, CUDA: mvlgamma_out
- tags: pointwise
- - func: mvlgamma(Tensor self, int p) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: mvlgamma
- tags: pointwise
- - func: mvlgamma_(Tensor(a!) self, int p) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- dispatch:
- CompositeExplicitAutograd: mvlgamma_
- tags: pointwise
- - func: narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor
- variants: function, method
- dispatch:
- CPU: narrow_copy_dense_cpu
- SparseCPU, SparseCUDA: narrow_copy_sparse
- CompositeExplicitAutogradNonFunctional: narrow_copy_dense_symint
- tags: view_copy
- - func: narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU: narrow_copy_dense_cpu_out
- - func: narrow(Tensor(a) self, int dim, SymInt start, SymInt length) -> Tensor(a)
- variants: function, method
- device_check: NoCheck
- device_guard: False
- dispatch:
- CompositeImplicitAutograd: narrow_symint
- - func: narrow.Tensor(Tensor(a) self, int dim, Tensor start, SymInt length) -> Tensor(a)
- variants: function, method
- device_check: NoCheck
- device_guard: False
- dispatch:
- CompositeImplicitAutograd: narrow_tensor_symint
- - func: native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)
- dispatch:
- CPU: batch_norm_cpu
- CUDA: batch_norm_cuda
- MPS: batch_norm_mps
- MkldnnCPU: mkldnn_batch_norm
- tags: core
- - func: native_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))
- dispatch:
- CUDA: batch_norm_cuda_out
- MPS: batch_norm_mps_out
- CPU: batch_norm_cpu_out
- # TODO: In 2 weeks, we should make native_batch_norm composite implicit so that this correct schema percolates correctly through our dispatching
- - func: _native_batch_norm_legit(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)
- dispatch:
- CPU: _batch_norm_legit_cpu
- CUDA: _batch_norm_legit_cuda
- MPS: _batch_norm_legit_mps
- MkldnnCPU: _mkldnn_batch_norm_legit
- autogen: _native_batch_norm_legit_functional
- - func: _native_batch_norm_legit.out(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps, *, Tensor(d!) out, Tensor(e!) save_mean, Tensor(f!) save_invstd) -> (Tensor(d!), Tensor(e!), Tensor(f!))
- dispatch:
- CPU: _batch_norm_legit_cpu_out
- CUDA: _batch_norm_legit_cuda_out
- MPS: _batch_norm_legit_mps_out
- - func: _native_batch_norm_legit.no_stats(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)
- dispatch:
- CPU: _batch_norm_legit_no_stats_cpu
- CUDA: _batch_norm_legit_no_stats_cuda
- MPS: _batch_norm_legit_no_stats_mps
- MkldnnCPU: _mkldnn_batch_norm_legit_no_stats
- tags: core
- - func: _native_batch_norm_legit.no_stats_out(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))
- dispatch:
- CPU: _batch_norm_legit_no_stats_cpu_out
- CUDA: _batch_norm_legit_no_stats_cuda_out
- MPS: _batch_norm_legit_no_stats_mps_out
- - func: batch_norm_stats(Tensor input, float eps) -> (Tensor, Tensor)
- dispatch:
- CUDA: batch_norm_stats_cuda
- autogen: batch_norm_stats.out
- - func: batch_norm_elemt(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps) -> Tensor
- dispatch:
- CUDA: batch_norm_elemt_cuda
- - func: batch_norm_elemt.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CUDA: batch_norm_elemt_cuda_out
- # for backward compatibility
- - func: batch_norm_gather_stats(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count) -> (Tensor, Tensor)
- dispatch:
- CUDA: batch_norm_gather_stats_cuda
- autogen: batch_norm_gather_stats.out
- - func: batch_norm_gather_stats_with_counts(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts) -> (Tensor, Tensor)
- dispatch:
- CUDA: batch_norm_gather_stats_with_counts_cuda
- autogen: batch_norm_gather_stats_with_counts.out
- - func: native_batch_norm_backward(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
- dispatch:
- CPU: batch_norm_backward_cpu
- CUDA: batch_norm_backward_cuda
- MPS: batch_norm_backward_mps
- MkldnnCPU: mkldnn_batch_norm_backward
- autogen: native_batch_norm_backward.out
- - func: batch_norm_backward_reduce(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g) -> (Tensor, Tensor, Tensor, Tensor)
- dispatch:
- CUDA: batch_norm_backward_reduce_cuda
- autogen: batch_norm_backward_reduce.out
- - func: batch_norm_backward_elemt(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu, Tensor count) -> Tensor
- dispatch:
- CUDA: batch_norm_backward_elemt_cuda
- autogen: batch_norm_backward_elemt.out
- - func: batch_norm_update_stats(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum) -> (Tensor, Tensor)
- dispatch:
- CPU: batch_norm_update_stats_cpu
- CUDA: batch_norm_update_stats_cuda
- autogen: batch_norm_update_stats.out
- - func: is_vulkan_available() -> bool
- - func: _nnpack_available() -> bool
- - func: _nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, int[2] stride=1) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutograd: _nnpack_spatial_convolution
- autogen: _nnpack_spatial_convolution.out
- - func: ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- device_check: NoCheck
- device_guard: False
- dispatch:
- CompositeExplicitAutograd: ones
- autogen: ones.names_out
- - func: ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- dispatch:
- CompositeExplicitAutograd: ones
- - func: ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CompositeExplicitAutograd: ones_out
- - func: ones_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
- dispatch:
- # NB: Although this composite mutates on the inside, it is
- # non-differentiable so NonFunctional doesn't apply
- CompositeExplicitAutograd: ones_like
- NestedTensorCPU, NestedTensorCUDA: ones_like
- autogen: ones_like.out
- - func: pairwise_distance(Tensor x1, Tensor x2, float p=2, float eps=1e-06, bool keepdim=False) -> Tensor
- - func: cdist(Tensor x1, Tensor x2, float p=2, int? compute_mode=None) -> Tensor
- - func: _euclidean_dist(Tensor x1, Tensor x2) -> Tensor
- dispatch:
- CompositeExplicitAutograd: _euclidean_dist
- autogen: _euclidean_dist.out
- - func: _cdist_forward(Tensor x1, Tensor x2, float p, int? compute_mode) -> Tensor
- dispatch:
- CPU, CUDA: _cdist_forward
- MPS: _cdist_forward_mps
- autogen: _cdist_forward.out
- - func: _cdist_backward(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist) -> Tensor
- dispatch:
- CPU, CUDA: _cdist_backward
- autogen: _cdist_backward.out
- - func: pdist(Tensor self, float p=2) -> Tensor
- - func: _pdist_forward(Tensor self, float p=2) -> Tensor
- dispatch:
- CPU, CUDA: _pdist_forward
- autogen: _pdist_forward.out
- - func: _pdist_backward(Tensor grad, Tensor self, float p, Tensor pdist) -> Tensor
- dispatch:
- CPU, CUDA: _pdist_backward
- autogen: _pdist_backward.out
- - func: cosine_similarity(Tensor x1, Tensor x2, int dim=1, float eps=1e-08) -> Tensor
- variants: function
- - func: permute(Tensor(a) self, int[] dims) -> Tensor(a)
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: permute
- MPS: permute_mps
- SparseCPU, SparseCUDA: permute_sparse_coo
- tags: core
- - func: movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)
- variants: function, method
- - func: movedim.int(Tensor(a) self, int source, int destination) -> Tensor(a)
- variants: function, method
- # moveaxis, alias for movedim
- - func: moveaxis.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)
- variants: function, method
- - func: moveaxis.int(Tensor(a) self, int source, int destination) -> Tensor(a)
- variants: function, method
- # Only exposed from C++ -- in Python,
- # we expose it as an attribute `T`, not a function.
- #
- # I'd like to name this "T" in C++ too, but
- # calling a native function "T" causes undefined
- # behavior on Windows, for reasons I don't understand
- # (maybe related to capital letter collation somehow...)
- - func: numpy_T(Tensor(a) self) -> Tensor(a)
- variants: method
- # Exposed on Python as an attribute 'H'
- - func: matrix_H(Tensor(a) self) -> Tensor(a)
- variants: method
- # Exposed on Python as an attribute 'mT'
- - func: mT(Tensor(a) self) -> Tensor(a)
- variants: method
- # Exposed on Python as an attribute 'mH'
- - func: mH(Tensor(a) self) -> Tensor(a)
- variants: method
- - func: adjoint(Tensor(a) self) -> Tensor(a)
- variants: function, method
- - func: pixel_shuffle(Tensor self, int upscale_factor) -> Tensor
- dispatch:
- CPU: pixel_shuffle_cpu
- CompositeExplicitAutogradNonFunctional: math_pixel_shuffle
- autogen: pixel_shuffle.out
- - func: pixel_unshuffle(Tensor self, int downscale_factor) -> Tensor
- dispatch:
- CPU: pixel_unshuffle_cpu
- CompositeExplicitAutogradNonFunctional: math_pixel_unshuffle
- autogen: pixel_unshuffle.out
- - func: channel_shuffle(Tensor self, int groups) -> Tensor
- dispatch:
- CPU: channel_shuffle
- QuantizedCPU: channel_shuffle_quantized_cpu
- autogen: channel_shuffle.out
- - func: native_channel_shuffle(Tensor self, int groups) -> Tensor
- dispatch:
- CPU: channel_shuffle_cpu
- CompositeImplicitAutograd: math_channel_shuffle
- - func: is_pinned(Tensor self, Device? device=None) -> bool
- variants: method
- dispatch:
- CUDA: is_pinned_cuda
- MPS: is_pinned_mps
- CompositeExplicitAutograd: is_pinned_default
- # TODO: add a copy kwarg that guarantees that the tensor is put into fresh
- # pinned memory
- - func: pin_memory(Tensor(a) self, Device? device=None) -> Tensor(a)
- variants: method
- # Unlike pin_memory, this is guaranteed to give a new non-aliasing tensor
- - func: _pin_memory(Tensor self, Device? device=None) -> Tensor
- dispatch:
- CUDA: _pin_memory_cuda
- MPS: _pin_memory_mps
- autogen: _pin_memory.out
- - func: pinverse(Tensor self, float rcond=1e-15) -> Tensor
- variants: function, method
- - func: poisson_nll_loss(Tensor input, Tensor target, bool log_input, bool full, float eps, int reduction) -> Tensor
- variants: function
- - func: rad2deg(Tensor self) -> Tensor
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: rad2deg
- SparseCPU, SparseCUDA: rad2deg_sparse
- SparseCsrCPU, SparseCsrCUDA: rad2deg_sparse_csr
- - func: rad2deg_(Tensor(a!) self) -> Tensor(a!)
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: rad2deg_
- SparseCPU, SparseCUDA: rad2deg_sparse_
- SparseCsrCPU, SparseCsrCUDA: rad2deg_sparse_csr_
- - func: rad2deg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CompositeExplicitAutograd: rad2deg_out
- SparseCPU, SparseCUDA: rad2deg_sparse_out
- SparseCsrCPU, SparseCsrCUDA: rad2deg_sparse_csr_out
- - func: deg2rad(Tensor self) -> Tensor
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: deg2rad
- SparseCPU, SparseCUDA: deg2rad_sparse
- SparseCsrCPU, SparseCsrCUDA: deg2rad_sparse_csr
- tags: pointwise
- - func: deg2rad_(Tensor(a!) self) -> Tensor(a!)
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: deg2rad_
- SparseCPU, SparseCUDA: deg2rad_sparse_
- SparseCsrCPU, SparseCsrCUDA: deg2rad_sparse_csr_
- tags: pointwise
- - func: deg2rad.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CompositeExplicitAutograd: deg2rad_out
- SparseCPU, SparseCUDA: deg2rad_sparse_out
- SparseCsrCPU, SparseCsrCUDA: deg2rad_sparse_csr_out
- tags: pointwise
- - func: scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- dispatch:
- CompositeExplicitAutograd: scalar_tensor
- autogen: scalar_tensor.out
- tags: core
- - func: rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- device_check: NoCheck
- device_guard: False
- dispatch:
- CompositeExplicitAutograd: rand
- autogen: rand.names_out
- tags: nondeterministic_seeded
- - func: rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- device_check: NoCheck
- device_guard: False
- tags: nondeterministic_seeded
- dispatch:
- CompositeExplicitAutograd: rand
- autogen: rand.generator_with_names_out
- - func: rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- tags: nondeterministic_seeded
- dispatch:
- CompositeExplicitAutograd: rand
- - func: rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- tags: nondeterministic_seeded
- dispatch:
- CompositeExplicitAutograd: rand
- - func: rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
- tags: nondeterministic_seeded
- dispatch:
- CompositeExplicitAutograd: rand_out
- - func: rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
- tags: nondeterministic_seeded
- - func: rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
- tags: nondeterministic_seeded
- dispatch:
- # NB: Although this composite mutates on the inside, it is
- # non-differentiable so NonFunctional doesn't apply
- CompositeExplicitAutograd: rand_like
- autogen: rand_like.out
- - func: randint(int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- tags: nondeterministic_seeded
- dispatch:
- CompositeExplicitAutograd: randint
- - func: randint.generator(int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- tags: nondeterministic_seeded
- dispatch:
- CompositeExplicitAutograd: randint
- - func: randint.low(int low, int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- tags: nondeterministic_seeded
- dispatch:
- CompositeExplicitAutograd: randint
- - func: randint.low_generator(int low, int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- tags: nondeterministic_seeded
- dispatch:
- CompositeExplicitAutograd: randint
- - func: randint.out(int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
- tags: nondeterministic_seeded
- dispatch:
- CompositeExplicitAutograd: randint_out
- - func: randint.generator_out(int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
- tags: nondeterministic_seeded
- dispatch:
- CompositeExplicitAutograd: randint_out
- - func: randint.low_out(int low, int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
- tags: nondeterministic_seeded
- dispatch:
- CompositeExplicitAutograd: randint_out
- - func: randint.low_generator_out(int low, int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
- tags: nondeterministic_seeded
- dispatch:
- CompositeExplicitAutograd: randint_out
- - func: randint_like(Tensor self, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
- tags: nondeterministic_seeded
- dispatch:
- # NB: Although this composite mutates on the inside, it is
- # non-differentiable so NonFunctional doesn't apply
- CompositeExplicitAutograd: randint_like
- autogen: randint_like.out
- - func: randint_like.low_dtype(Tensor self, int low, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
- tags: nondeterministic_seeded
- dispatch:
- # NB: Although this composite mutates on the inside, it is
- # non-differentiable so NonFunctional doesn't apply
- CompositeExplicitAutograd: randint_like
- autogen: randint_like.low_dtype_out
- - func: randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- tags: nondeterministic_seeded
- dispatch:
- CompositeExplicitAutograd: randn
- - func: randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- tags: nondeterministic_seeded
- dispatch:
- CompositeExplicitAutograd: randn
- - func: randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- tags: nondeterministic_seeded
- device_check: NoCheck
- device_guard: False
- dispatch:
- CompositeExplicitAutograd: randn
- autogen: randn.names_out
- - func: randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- tags: nondeterministic_seeded
- device_check: NoCheck
- device_guard: False
- dispatch:
- CompositeExplicitAutograd: randn
- autogen: randn.generator_with_names_out
- - func: randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
- tags: nondeterministic_seeded
- - func: randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
- tags: nondeterministic_seeded
- - func: randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
- tags: nondeterministic_seeded
- dispatch:
- # NB: Although this composite mutates on the inside, it is
- # non-differentiable so NonFunctional doesn't apply
- CompositeExplicitAutograd: randn_like
- autogen: randn_like.out
- - func: randperm(int n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- tags: nondeterministic_seeded
- dispatch:
- CompositeExplicitAutograd: randperm
- - func: randperm.generator(int n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- tags: nondeterministic_seeded
- dispatch:
- CompositeExplicitAutograd: randperm
- - func: randperm.out(int n, *, Tensor(a!) out) -> Tensor(a!)
- tags: nondeterministic_seeded
- dispatch:
- CompositeExplicitAutograd: randperm_out
- - func: randperm.generator_out(int n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
- tags: nondeterministic_seeded
- dispatch:
- CPU: randperm_out_cpu
- CUDA: randperm_out_cuda
- MPS: randperm_out_mps
- - func: range.step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- dispatch:
- CompositeExplicitAutograd: range
- - func: range(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- dispatch:
- CompositeExplicitAutograd: range
- - func: range.out_(Scalar start, Scalar end, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CompositeExplicitAutograd: range_out_no_step
- - func: range.out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU, Meta: range_out
- CUDA: range_cuda_out
- MPS: range_mps_out
- cpp_no_default_args: ['step']
- - func: ravel(Tensor(a) self) -> Tensor(a)
- variants: function, method
- - func: reciprocal(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: reciprocal.out
- variants: function, method
- tags: [core, pointwise]
- - func: reciprocal_(Tensor(a!) self) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured_delegate: reciprocal.out
- variants: function, method
- tags: pointwise
- - func: reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: reciprocal_out
- MPS: reciprocal_out_mps
- tags: pointwise
- - func: neg(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: neg.out
- variants: function, method
- dispatch:
- SparseCPU, SparseCUDA: neg_sparse
- SparseCsrCPU, SparseCsrCUDA: neg_sparse_csr
- NestedTensorCPU, NestedTensorCUDA: NestedTensor_neg
- tags: [core, pointwise]
- - func: neg_(Tensor(a!) self) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured_delegate: neg.out
- variants: function, method
- dispatch:
- SparseCPU, SparseCUDA: neg_sparse_
- SparseCsrCPU, SparseCsrCUDA: neg_sparse_csr_
- NestedTensorCPU, NestedTensorCUDA: NestedTensor_neg_
- tags: pointwise
- - func: neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: neg_out
- MPS: neg_out_mps
- SparseCPU, SparseCUDA: neg_out_sparse
- SparseCsrCPU, SparseCsrCUDA: neg_sparse_csr_out
- tags: pointwise
- # Alias for neg
- - func: negative(Tensor self) -> Tensor
- variants: function, method
- - func: negative_(Tensor(a!) self) -> Tensor(a!)
- variants: function, method
- - func: negative.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- - func: repeat(Tensor self, SymInt[] repeats) -> Tensor
- variants: method # This is method-only to match the previous tensor API. In the future we could make this a function too.
- dispatch:
- CompositeExplicitAutograd: repeat
- MPS: repeat_mps
- autogen: repeat.out
- tags: core
- - func: repeat_interleave.Tensor(Tensor repeats, *, int? output_size=None) -> Tensor
- variants: function
- dispatch:
- CPU: repeat_interleave_cpu
- CUDA: repeat_interleave_cuda
- MPS: repeat_interleave_mps
- tags: dynamic_output_shape
- autogen: repeat_interleave.Tensor_out
- - func: repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, int? output_size=None) -> Tensor
- variants: function, method
- - func: repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, int? output_size=None) -> Tensor
- variants: function, method
- dispatch:
- CompositeImplicitAutograd: repeat_interleave_symint
- - func: reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a)
- variants: function, method
- device_check: NoCheck
- device_guard: False
- dispatch:
- CompositeImplicitAutograd: reshape_symint
- CompositeImplicitAutogradNestedTensor: reshape_nested
- - func: _reshape_copy(Tensor self, SymInt[] size) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutograd: _reshape_copy_symint
- # NOTE [ _reshape_alias ] is meant to be used in the implementation of reshape.
- # They are not user-facing, hence the leading underscore. Please don't use it
- # anywhere else.
- - func: _reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a)
- variants: function, method
- device_check: NoCheck
- device_guard: False
- dispatch:
- CPU, CUDA, Meta, QuantizedCPU, QuantizedCUDA, ZeroTensor, MPS: _reshape_alias
- # We don't need to support mkldnn since this is handled explicitly by the reshape operator.
- - func: _mkldnn_reshape(Tensor self, int[] shape) -> Tensor
- device_check: NoCheck
- device_guard: False
- dispatch:
- MkldnnCPU: mkldnn_reshape
- autogen: _mkldnn_reshape.out
- - func: reshape_as(Tensor(a) self, Tensor other) -> Tensor(a)
- variants: method
- device_check: NoCheck
- device_guard: False
- dispatch:
- CompositeImplicitAutograd: reshape_as
- CompositeImplicitAutogradNestedTensor: reshape_as_nested
- - func: round(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: round.out
- variants: function, method
- dispatch:
- SparseCPU, SparseCUDA: round_sparse
- SparseCsrCPU, SparseCsrCUDA: round_sparse_csr
- tags: pointwise
- - func: round_(Tensor(a!) self) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured_delegate: round.out
- variants: function, method
- dispatch:
- SparseCPU, SparseCUDA: round_sparse_
- SparseCsrCPU, SparseCsrCUDA: round_sparse_csr_
- tags: pointwise
- - func: round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU: round_out
- CUDA: round_out
- MPS: round_out_mps
- SparseCPU, SparseCUDA: round_sparse_out
- SparseCsrCPU, SparseCsrCUDA: round_sparse_csr_out
- tags: pointwise
- - func: round.decimals(Tensor self, *, int decimals) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: round.decimals_out
- variants: function, method
- tags: pointwise
- - func: round_.decimals(Tensor(a!) self, *, int decimals) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured_delegate: round.decimals_out
- variants: function, method
- tags: pointwise
- - func: round.decimals_out(Tensor self, *, int decimals, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU: round_decimals_out
- CUDA: round_decimals_out
- tags: pointwise
- - func: rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor
- device_check: NoCheck # TensorIterator
- tags: nondeterministic_seeded
- - func: rrelu_(Tensor(a!) self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!)
- tags: nondeterministic_seeded
- device_check: NoCheck # TensorIterator
- - func: relu(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- CPU, CUDA: relu
- MPS: relu_mps
- MkldnnCPU: mkldnn_relu
- QuantizedCPU: relu_quantized_cpu
- QuantizedCUDA: relu_quantized_cuda
- NestedTensorCPU, NestedTensorCUDA: NestedTensor_relu
- SparseCPU, SparseCUDA: relu_sparse
- SparseCsrCPU, SparseCsrCUDA: relu_sparse_csr
- tags: [core, pointwise]
- - func: relu_(Tensor(a!) self) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- CPU, CUDA: relu_
- MPS: relu_mps_
- MkldnnCPU: mkldnn_relu_
- QuantizedCPU: relu_quantized_cpu_
- QuantizedCUDA: relu_quantized_cuda_
- NestedTensorCPU, NestedTensorCUDA: NestedTensor_relu_
- SparseCPU, SparseCUDA: relu_sparse_
- SparseCsrCPU, SparseCsrCUDA: relu_sparse_csr_
- autogen: relu.out
- tags: pointwise
- - func: relu6(Tensor self) -> Tensor
- python_module: nn
- - func: relu6_(Tensor(a!) self) -> Tensor(a!)
- python_module: nn
- - func: prelu(Tensor self, Tensor weight) -> Tensor
- variants: function, method
- autogen: prelu.out
- - func: _prelu_kernel(Tensor self, Tensor weight) -> Tensor
- dispatch:
- CPU, CUDA: _prelu_kernel
- QuantizedCPU: _prelu_kernel_quantized_cpu
- MkldnnCPU: mkldnn_prelu
- MPS: prelu_mps
- - func: _prelu_kernel_backward(Tensor grad_output, Tensor self, Tensor weight) -> (Tensor, Tensor)
- dispatch:
- CPU, CUDA: _prelu_kernel_backward
- MkldnnCPU: mkldnn_prelu_backward
- MPS: prelu_backward_mps
- - func: gelu.out(Tensor self, *, str approximate='none', Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- device_check: NoCheck # TensorIterator
- python_module: nn
- dispatch:
- CPU: gelu_out_cpu
- CUDA: gelu_out_cuda
- MPS: gelu_out_mps
- - func: gelu_(Tensor(a!) self, *, str approximate='none') -> Tensor(a!)
- structured_delegate: gelu.out
- device_check: NoCheck # TensorIterator
- python_module: nn
- dispatch:
- NestedTensorCPU, NestedTensorCUDA: NestedTensor_gelu_
- - func: gelu(Tensor self, *, str approximate='none') -> Tensor
- structured_delegate: gelu.out
- device_check: NoCheck # TensorIterator
- python_module: nn
- dispatch:
- MkldnnCPU: mkldnn_gelu
- QuantizedCPU: gelu_quantized_cpu
- QuantizedCUDA: gelu_quantized_cuda
- NestedTensorCPU, NestedTensorCUDA: NestedTensor_gelu
- tags: [core, pointwise]
- - func: gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate='none', Tensor(a!) grad_input) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- python_module: nn
- dispatch:
- CPU: gelu_backward_out_cpu
- CUDA: gelu_backward_out_cuda
- MPS: gelu_backward_out_mps
- - func: gelu_backward(Tensor grad_output, Tensor self, *, str approximate='none') -> Tensor
- structured_delegate: gelu_backward.grad_input
- python_module: nn
- dispatch:
- MkldnnCPU: mkldnn_gelu_backward
- NestedTensorCPU, NestedTensorCUDA: gelu_backwards_nested
- tags: pointwise
- - func: infinitely_differentiable_gelu_backward(Tensor grad, Tensor self) -> Tensor
- variants: function
- python_module: nn
- device_check: NoCheck
- device_guard: False
- - func: hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU, CUDA: hardshrink_out
- - func: hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor
- structured_delegate: hardshrink.out
- device_check: NoCheck # TensorIterator
- variants: function, method
- - func: hardshrink_backward.grad_input(Tensor grad_out, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: hardshrink_backward_out
- - func: hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor
- structured_delegate: hardshrink_backward.grad_input
- variants: function, method
- - func: rsqrt(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: rsqrt.out
- variants: function, method
- tags: [core, pointwise]
- - func: rsqrt_(Tensor(a!) self) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured_delegate: rsqrt.out
- variants: function, method
- tags: pointwise
- - func: rsqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: rsqrt_out
- MPS: rsqrt_out_mps
- tags: pointwise
- - func: select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a)
- variants: function, method
- device_check: NoCheck
- device_guard: False
- - func: select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a)
- variants: function, method
- device_check: NoCheck
- device_guard: False
- dispatch:
- CompositeExplicitAutograd: select_symint
- SparseCsrCPU, SparseCsrCUDA: select_sparse_csr
- NestedTensorCPU, NestedTensorCUDA: select_nested
- tags: core
- - func: select_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index) -> Tensor
- variants: function
- device_check: NoCheck
- device_guard: False
- dispatch:
- CompositeExplicitAutogradNonFunctional: select_backward_symint
- autogen: select_backward.out
- - func: _nested_select_backward(Tensor grad_output, Tensor self, int dim, SymInt index) -> Tensor
- variants: function
- device_check: NoCheck
- device_guard: False
- dispatch:
- NestedTensorCPU, NestedTensorCUDA: _nested_select_backward_symint
- - func: selu(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- - func: selu_(Tensor(a!) self) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- - func: celu(Tensor self, Scalar alpha=1.0) -> Tensor
- device_check: NoCheck # TensorIterator
- dispatch:
- CompositeExplicitAutograd: celu
- - func: celu_(Tensor(a!) self, Scalar alpha=1.0) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- dispatch:
- CompositeExplicitAutograd: celu_
- autogen: celu.out
- - func: silu(Tensor self) -> Tensor
- structured_delegate: silu.out
- python_module: nn
- - func: silu_(Tensor(a!) self) -> Tensor(a!)
- structured_delegate: silu.out
- python_module: nn
- - func: silu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- python_module: nn
- dispatch:
- CPU, CUDA: silu_out
- MPS: silu_out_mps
- - func: silu_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- python_module: nn
- dispatch:
- CPU, CUDA: silu_backward_out
- MPS: silu_backward_out_mps
- - func: silu_backward(Tensor grad_output, Tensor self) -> Tensor
- structured_delegate: silu_backward.grad_input
- python_module: nn
- dispatch:
- CompositeImplicitAutograd: math_silu_backward
- - func: mish(Tensor self) -> Tensor
- structured_delegate: mish.out
- python_module: nn
- - func: mish_(Tensor(a!) self) -> Tensor(a!)
- structured_delegate: mish.out
- python_module: nn
- - func: mish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- python_module: nn
- dispatch:
- CPU, CUDA: mish_out
- - func: mish_backward(Tensor grad_output, Tensor self) -> Tensor
- python_module: nn
- dispatch:
- CPU, CUDA: mish_backward
- CompositeImplicitAutograd: math_mish_backward
- - func: sigmoid(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: sigmoid.out
- variants: function, method
- dispatch:
- QuantizedCPU: sigmoid_quantized_cpu
- MkldnnCPU: mkldnn_sigmoid
- tags: [core, pointwise]
- - func: sigmoid_(Tensor(a!) self) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured_delegate: sigmoid.out
- variants: function, method
- dispatch:
- MkldnnCPU: mkldnn_sigmoid_
- tags: pointwise
- - func: sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: sigmoid_out
- MPS: sigmoid_out_mps
- tags: pointwise
- - func: logit(Tensor self, float? eps=None) -> Tensor
- variants: function, method
- dispatch:
- CPU, CUDA: logit
- tags: pointwise
- - func: logit_(Tensor(a!) self, float? eps=None) -> Tensor(a!)
- variants: function, method
- dispatch:
- CPU, CUDA: logit_
- tags: pointwise
- - func: logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU, CUDA: logit_out
- tags: pointwise
- - func: sin(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: sin.out
- variants: function, method
- dispatch:
- SparseCsrCPU, SparseCsrCUDA: sin_sparse_csr
- SparseCPU, SparseCUDA: sin_sparse
- tags: [core, pointwise]
- - func: sin_(Tensor(a!) self) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured_delegate: sin.out
- variants: function, method
- dispatch:
- SparseCsrCPU, SparseCsrCUDA: sin_sparse_csr_
- SparseCPU, SparseCUDA: sin_sparse_
- tags: pointwise
- - func: sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: sin_out
- MPS: sin_out_mps
- SparseCsrCPU, SparseCsrCUDA: sin_sparse_csr_out
- SparseCPU, SparseCUDA: sin_sparse_out
- tags: pointwise
- - func: sinc(Tensor self) -> Tensor
- structured_delegate: sinc.out
- variants: function, method
- tags: pointwise
- - func: sinc_(Tensor(a!) self) -> Tensor(a!)
- structured_delegate: sinc.out
- variants: function, method
- tags: pointwise
- - func: sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: sinc_out
- tags: pointwise
- - func: sinh(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: sinh.out
- variants: function, method
- dispatch:
- SparseCPU, SparseCUDA: sinh_sparse
- SparseCsrCPU, SparseCsrCUDA: sinh_sparse_csr
- tags: [core, pointwise]
- - func: sinh_(Tensor(a!) self) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured_delegate: sinh.out
- variants: function, method
- dispatch:
- SparseCPU, SparseCUDA: sinh_sparse_
- SparseCsrCPU, SparseCsrCUDA: sinh_sparse_csr_
- tags: pointwise
- - func: sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: sinh_out
- MPS: sinh_out_mps
- SparseCPU, SparseCUDA: sinh_sparse_out
- SparseCsrCPU, SparseCsrCUDA: sinh_sparse_csr_out
- # Returns a copy of this `Variable` that is detached from its autograd graph.
- # This method is OK to call if the `Variable` is a view.
- #
- # NOTE: Previously, if we change the tensor metadata (e.g. sizes / strides /
- # storage / storage_offset) of a tensor created from `detach()`, those metadata
- # in the original tensor will also be updated. However, the new behavior is that
- # those metadata changes to the detached tensor will not update the original tensor
- # anymore, and in the `detach()` function we need to set `allow_tensor_metadata_change_`
- # to false to make such changes explicitly illegal, in order to prevent users from
- # changing metadata of the detached tensor and expecting the original tensor to also
- # be updated.
- tags: pointwise
- - func: detach(Tensor(a) self) -> Tensor(a)
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: detach
- NestedTensorCPU, NestedTensorCUDA: detach
- # Like `detach()`, but modifies this `Variable` in-place. This method may
- # only be called on non-view `Variable`s. You can use `is_view()` to check
- # this. If this `Variable` is a view, throws an `std::runtime_error()`.
- - func: detach_(Tensor(a!) self) -> Tensor(a!)
- variants: function, method
- tags: inplace_view
- dispatch:
- CompositeExplicitAutograd: detach_
- - func: size.int(Tensor self, int dim) -> int
- variants: function
- device_check: NoCheck
- device_guard: False
- manual_cpp_binding: True
- - func: size.Dimname(Tensor self, Dimname dim) -> int
- variants: function, method
- device_check: NoCheck
- device_guard: False
- - func: slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a)
- variants: function, method
- device_check: NoCheck
- device_guard: False
- dispatch:
- CompositeExplicitAutograd: slice
- tags: core
- # NOTE: The implementation of split_with_sizes bypasses the dispatcher to call this; undo
- # that if adding specific implementations here!
- - func: slice_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step) -> Tensor
- variants: function
- device_check: NoCheck
- device_guard: False
- dispatch:
- CompositeExplicitAutograd: slice_backward
- autogen: slice_backward.out
- - func: slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor
- variants: function, method
- device_check: NoCheck
- device_guard: False
- dispatch:
- CompositeExplicitAutograd: slice_scatter
- autogen: slice_scatter.out
- tags: core
- - func: select_scatter(Tensor self, Tensor src, int dim, SymInt index) -> Tensor
- variants: function, method
- device_check: NoCheck
- device_guard: False
- dispatch:
- CompositeExplicitAutograd: select_scatter_symint
- autogen: select_scatter.out
- - func: diagonal_scatter(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1) -> Tensor
- variants: function, method
- device_check: NoCheck
- device_guard: False
- dispatch:
- CompositeExplicitAutograd: diagonal_scatter
- autogen: diagonal_scatter.out
- - func: as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor
- variants: function, method
- device_check: NoCheck
- device_guard: False
- dispatch:
- CompositeExplicitAutograd: as_strided_scatter_symint
- autogen: as_strided_scatter.out
- - func: smm(Tensor self, Tensor mat2) -> Tensor
- variants: function, method
- # softmax allows positional dtype, unlike most operators, because kwonly is BC-breaking when loading jit models.
- - func: softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
- variants: function, method
- - func: softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)
- variants: function
- dispatch:
- CompositeExplicitAutograd: softmax_out
- - func: softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
- variants: function, method
- - func: _softmax(Tensor self, int dim, bool half_to_float) -> Tensor
- structured_delegate: _softmax.out
- dispatch:
- MkldnnCPU: mkldnn_softmax
- NestedTensorCPU, NestedTensorCUDA: softmax_nested
- tags: core
- - func: _softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- dispatch:
- CPU: softmax_cpu_out
- CUDA: softmax_cuda_out
- MPS: softmax_mps_out
- - func: _softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor
- structured_delegate: _softmax_backward_data.out
- dispatch:
- NestedTensorCPU, NestedTensorCUDA: nested_softmax_backward
- - func: _softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) grad_input) -> Tensor(a!)
- structured: True
- dispatch:
- CPU: softmax_backward_cpu_out
- CUDA: softmax_backward_cuda_out
- MPS: softmax_backward_mps_out
- - func: unsafe_split.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]
- variants: function, method
- device_check: NoCheck
- device_guard: False
- dispatch:
- CompositeExplicitAutograd: unsafe_split
- autogen: unsafe_split.Tensor_out
- - func: split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[]
- variants: function, method
- device_check: NoCheck
- device_guard: False
- dispatch:
- CompositeExplicitAutograd: split
- - func: split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[]
- variants: function, method
- device_guard: False
- dispatch:
- CompositeImplicitAutograd: split_symint
- - func: unsafe_split_with_sizes(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]
- variants: function, method
- device_check: NoCheck
- device_guard: False
- dispatch:
- CompositeExplicitAutograd: unsafe_split_with_sizes
- autogen: unsafe_split_with_sizes.out
- - func: split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[]
- variants: function, method
- device_check: NoCheck
- device_guard: False
- dispatch:
- CompositeExplicitAutograd: split_with_sizes
- - func: hsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]
- variants: function, method
- - func: hsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]
- variants: function, method
- - func: vsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]
- variants: function, method
- - func: vsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]
- variants: function, method
- - func: dsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]
- variants: function, method
- - func: dsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]
- variants: function, method
- - func: squeeze(Tensor(a) self) -> Tensor(a)
- variants: function, method
- device_check: NoCheck
- device_guard: False
- dispatch:
- CompositeExplicitAutograd: squeeze
- QuantizedCPU, QuantizedCUDA: squeeze_quantized
- NestedTensorCPU, NestedTensorCUDA: squeeze_nested
- - func: squeeze.dim(Tensor(a) self, int dim) -> Tensor(a)
- variants: function, method
- device_check: NoCheck
- device_guard: False
- dispatch:
- CompositeExplicitAutograd: squeeze
- QuantizedCPU, QuantizedCUDA: squeeze_quantized
- NestedTensorCPU, NestedTensorCUDA: squeeze_dim_nested
- tags: core
- - func: squeeze.dimname(Tensor(a) self, Dimname dim) -> Tensor(a)
- variants: function, method
- device_check: NoCheck
- device_guard: False
- - func: squeeze.dims(Tensor(a) self, int[] dim) -> Tensor(a)
- variants: function, method
- device_check: NoCheck
- device_guard: False
- dispatch:
- CompositeExplicitAutograd: squeeze
- QuantizedCPU, QuantizedCUDA: squeeze_quantized
- NestedTensorCPU, NestedTensorCUDA: squeeze_dim_nested
- tags: core
- - func: squeeze_(Tensor(a!) self) -> Tensor(a!)
- variants: method
- device_check: NoCheck
- device_guard: False
- tags: inplace_view
- dispatch:
- CompositeExplicitAutograd: squeeze_
- - func: squeeze_.dim(Tensor(a!) self, int dim) -> Tensor(a!)
- variants: method
- device_check: NoCheck
- device_guard: False
- tags: inplace_view
- dispatch:
- CompositeExplicitAutograd: squeeze_
- - func: squeeze_.dims(Tensor(a!) self, int[] dim) -> Tensor(a!)
- variants: method
- device_check: NoCheck
- device_guard: False
- tags: inplace_view
- dispatch:
- CompositeExplicitAutograd: squeeze_
- - func: squeeze_.dimname(Tensor(a!) self, Dimname dim) -> Tensor(a!)
- variants: method
- device_check: NoCheck
- device_guard: False
- tags: inplace_view
- - func: sspaddmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
- variants: function, method
- - func: sspaddmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU: _sspaddmm_out_only_sparse
- CUDA: _sspaddmm_out_only_sparse_cuda
- SparseCPU: _sspaddmm_out_cpu
- SparseCUDA: _sspaddmm_out_cuda
- - func: stack(Tensor[] tensors, int dim=0) -> Tensor
- dispatch:
- CompositeExplicitAutograd: stack
- - func: stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CompositeExplicitAutograd: stack_out
- - func: _stack(Tensor[] tensors, int dim=0) -> Tensor
- dispatch: # match the backends supported by _cat
- CPU: _stack_cpu
- CompositeExplicitAutograd: _stack
- - func: _stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
- dispatch: # match the backends supported by _cat_out
- CPU: _stack_out_cpu
- CompositeExplicitAutograd: _stack_out
- - func: hstack(Tensor[] tensors) -> Tensor
- - func: hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
- - func: vstack(Tensor[] tensors) -> Tensor
- - func: vstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
- - func: dstack(Tensor[] tensors) -> Tensor
- - func: dstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
- # Overload without center & pad mode, needed for forward-compatibility
- - func: stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor
- variants: function, method
- cpp_no_default_args: ['hop_length', 'win_length', 'window', 'normalized']
- - func: stft.center(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, str pad_mode="reflect", bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor
- variants: function, method
- - func: istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> Tensor
- variants: function, method
- - func: stride.int(Tensor self, int dim) -> int
- variants: function
- device_check: NoCheck
- device_guard: False
- manual_cpp_binding: True
- - func: stride.Dimname(Tensor self, Dimname dim) -> int
- variants: function, method
- device_check: NoCheck
- device_guard: False
- - func: sum(Tensor self, *, ScalarType? dtype=None) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: sum
- SparseCPU, SparseCUDA: sum_coo
- SparseCsrCPU, SparseCsrCUDA: sum_csr
- autogen: sum.out
- - func: sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
- structured_delegate: sum.IntList_out
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- NestedTensorCPU: NestedTensor_sum_dim_CPU
- SparseCPU, SparseCUDA: sum_sparse_coo
- tags: core
- - func: sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- - func: sum.IntList_out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
- structured: True
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU, CUDA: sum_out
- MPS: sum_out_mps
- - func: sum.DimnameList_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- # TODO: this function will be replaced once nested expand semantics have been settled on
- - func: _nested_sum_backward(Tensor grad, Tensor self, int[1]? dim, bool keepdim=False) -> Tensor
- dispatch:
- NestedTensorCPU: _nested_sum_backward_cpu
- - func: nansum(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
- variants: function, method
- dispatch:
- CPU, CUDA: nansum
- MPS: nansum_mps
- - func: nansum.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU, CUDA: nansum_out
- MPS: nansum_out_mps
- - func: sum_to_size(Tensor self, int[] size) -> Tensor
- variants: method
- device_check: NoCheck
- device_guard: False
- - func: sqrt(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: sqrt.out
- variants: function, method
- dispatch:
- SparseCPU, SparseCUDA: sqrt_sparse
- SparseCsrCPU, SparseCsrCUDA: sqrt_sparse_csr
- tags: [core, pointwise]
- - func: sqrt_(Tensor(a!) self) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured_delegate: sqrt.out
- variants: function, method
- dispatch:
- SparseCPU, SparseCUDA: sqrt_sparse_
- SparseCsrCPU, SparseCsrCUDA: sqrt_sparse_csr_
- tags: pointwise
- - func: sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: sqrt_out
- MPS: sqrt_out_mps
- SparseCPU, SparseCUDA: sqrt_sparse_out
- SparseCsrCPU, SparseCsrCUDA: sqrt_sparse_csr_out
- tags: pointwise
- - func: square(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- tags: pointwise
- - func: square_(Tensor(a!) self) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: function, method
- tags: pointwise
- - func: square.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- tags: pointwise
- - func: std(Tensor self, bool unbiased=True) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- cpp_no_default_args: ["unbiased"]
- - func: std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- cpp_no_default_args: ["unbiased"]
- - func: std.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- CPU, CUDA: std
- MPS: std_mps
- QuantizedCPU: std_quantized_cpu
- - func: std_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)
- device_check: NoCheck # TensorIterator
- variants: function
- cpp_no_default_args: ["unbiased"]
- - func: std_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
- device_check: NoCheck # TensorIterator
- variants: function
- cpp_no_default_args: ["unbiased"]
- - func: std_mean.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor)
- device_check: NoCheck # TensorIterator
- variants: function
- dispatch:
- CPU, CUDA: std_mean
- autogen: std_mean.correction_out
- - func: std_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
- device_check: NoCheck # TensorIterator
- variants: function
- cpp_no_default_args: ["unbiased"]
- - func: std_mean.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor)
- device_check: NoCheck # TensorIterator
- variants: function
- - func: std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- cpp_no_default_args: ["unbiased"]
- - func: std.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU, CUDA: std_out
- QuantizedCPU: std_out_quantized_cpu
- - func: std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- cpp_no_default_args: ["unbiased"]
- - func: std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- cpp_no_default_args: ["unbiased"]
- - func: std.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- - func: std.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: function
- - func: prod(Tensor self, *, ScalarType? dtype=None) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- CPU, CUDA: prod
- MPS: prod_mps
- autogen: prod.out
- - func: prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
- structured_delegate: prod.int_out
- device_check: NoCheck # TensorIterator
- variants: function, method
- - func: prod.int_out(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
- structured: True
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU, CUDA: prod_out
- MPS: prod_out_mps
- - func: prod.dim_Dimname(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- - func: prod.Dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- - func: t(Tensor(a) self) -> Tensor(a)
- device_check: NoCheck
- device_guard: False
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: t
- - func: t_(Tensor(a!) self) -> Tensor(a!)
- device_check: NoCheck
- device_guard: False
- variants: method
- tags: inplace_view
- dispatch:
- CompositeExplicitAutograd: t_
- - func: tan(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: tan.out
- variants: function, method
- dispatch:
- SparseCPU, SparseCUDA: tan_sparse
- SparseCsrCPU, SparseCsrCUDA: tan_sparse_csr
- tags: pointwise
- - func: tan_(Tensor(a!) self) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured_delegate: tan.out
- variants: function, method
- dispatch:
- SparseCPU, SparseCUDA: tan_sparse_
- SparseCsrCPU, SparseCsrCUDA: tan_sparse_csr_
- tags: pointwise
- - func: tan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: tan_out
- MPS: tan_out_mps
- SparseCPU, SparseCUDA: tan_sparse_out
- SparseCsrCPU, SparseCsrCUDA: tan_sparse_csr_out
- tags: pointwise
- - func: tanh(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: tanh.out
- variants: function, method
- dispatch:
- QuantizedCPU: tanh_quantized_cpu
- MkldnnCPU: mkldnn_tanh
- SparseCPU, SparseCUDA: tanh_sparse
- SparseCsrCPU, SparseCsrCUDA: tanh_sparse_csr
- NestedTensorCPU, NestedTensorCUDA: NestedTensor_tanh
- tags: [core, pointwise]
- - func: tanh_(Tensor(a!) self) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured_delegate: tanh.out
- variants: function, method
- dispatch:
- MkldnnCPU: mkldnn_tanh_
- SparseCPU, SparseCUDA: tanh_sparse_
- SparseCsrCPU, SparseCsrCUDA: tanh_sparse_csr_
- NestedTensorCPU, NestedTensorCUDA: NestedTensor_tanh_
- tags: pointwise
- - func: tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: tanh_out
- MPS: tanh_out_mps
- SparseCPU, SparseCUDA: tanh_sparse_out
- SparseCsrCPU, SparseCsrCUDA: tanh_sparse_csr_out
- tags: pointwise
- - func: tensordot(Tensor self, Tensor other, int[] dims_self, int[] dims_other) -> Tensor
- variants: function
- - func: tensordot.out(Tensor self, Tensor other, int[] dims_self, int[] dims_other, *, Tensor(a!) out) -> Tensor(a!)
- variants: function
- dispatch:
- CPU, CUDA: tensordot_out
- # TODO: namespace threshold in 'nn'
- - func: threshold(Tensor self, Scalar threshold, Scalar value) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function
- structured_delegate: threshold.out
- dispatch:
- QuantizedCPU: threshold_quantized_cpu
- - func: threshold_(Tensor(a!) self, Scalar threshold, Scalar value) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: function
- structured_delegate: threshold.out
- - func: threshold.out(Tensor self, Scalar threshold, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: threshold_out
- MPS: threshold_out_mps
- - func: threshold_backward.grad_input(Tensor grad_output, Tensor self, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: threshold_backward_out
- MPS: threshold_backward_out_mps
- SparseCPU, SparseCUDA: threshold_backward_sparse_out
- SparseCsrCPU, SparseCsrCUDA: threshold_backward_sparse_compressed_out
- - func: threshold_backward(Tensor grad_output, Tensor self, Scalar threshold) -> Tensor
- variants: function
- structured_delegate: threshold_backward.grad_input
- dispatch:
- MkldnnCPU: mkldnn_relu_backward
- SparseCPU, SparseCUDA: threshold_backward_sparse
- SparseCsrCPU, SparseCsrCUDA: threshold_backward_sparse_compressed
- NestedTensorCPU, NestedTensorCUDA: threshold_backwards_nested
- tags: pointwise
- - func: tile(Tensor self, int[] dims) -> Tensor
- variants: function, method
- - func: transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a)
- variants: function, method
- device_check: NoCheck
- device_guard: False
- dispatch:
- CompositeExplicitAutograd: transpose
- NestedTensorCPU, NestedTensorCUDA: transpose_nested
- - func: transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a)
- variants: function, method
- device_check: NoCheck
- device_guard: False
- - func: _mkldnn_transpose(Tensor self, int dim0, int dim1) -> Tensor
- device_check: NoCheck
- device_guard: False
- dispatch:
- MkldnnCPU: mkldnn_transpose
- - func: transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)
- variants: method
- device_check: NoCheck
- device_guard: False
- tags: inplace_view
- dispatch:
- CompositeExplicitAutograd: transpose_
- - func: _mkldnn_transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)
- device_check: NoCheck
- device_guard: False
- dispatch:
- MkldnnCPU: mkldnn_transpose_
- autogen: _mkldnn_transpose.out
- - func: one_hot(Tensor self, int num_classes=-1) -> Tensor
- python_module: nn
- variants: function
- tags: dynamic_output_shape
- - func: flip(Tensor self, int[] dims) -> Tensor
- variants: function, method
- dispatch:
- CPU, QuantizedCPU, CUDA, QuantizedCUDA: flip
- MPS: flip_mps
- autogen: flip.out
- tags: core
- - func: fliplr(Tensor self) -> Tensor
- variants: function, method
- - func: flipud(Tensor self) -> Tensor
- variants: function, method
- - func: roll(Tensor self, int[1] shifts, int[1] dims=[]) -> Tensor
- variants: function, method
- dispatch:
- CPU: roll_cpu
- CUDA: roll_cuda
- autogen: roll.out
- # default int[] value [0,1] should not add space after comma, since codegen parser uses ', ' to split args
- - func: rot90(Tensor self, int k=1, int[] dims=[0,1]) -> Tensor
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: rot90
- autogen: rot90.out
- - func: trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor
- - func: trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor
- - func: trapz.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor
- - func: trapz.dx(Tensor y, *, float dx=1, int dim=-1) -> Tensor
- # Fused implementation detail for transformers. Adds in-projection bias to QKV and divides Q by sqrt(D/num_heads).
- - func: _transform_bias_rescale_qkv(Tensor qkv, Tensor qkv_bias, int num_heads) -> (Tensor, Tensor, Tensor)
- dispatch:
- CPU, NestedTensorCPU: transform_bias_rescale_qkv_cpu
- CUDA, NestedTensorCUDA: transform_bias_rescale_qkv_cuda
- autogen: _transform_bias_rescale_qkv.out
- - func: _nested_tensor_from_mask(Tensor t, Tensor mask, bool mask_check=True) -> Tensor
- dispatch:
- CPU, CUDA: NestedTensor_nested_tensor_from_mask
- autogen: _nested_tensor_from_mask.out
- - func: _nested_tensor_from_mask_left_aligned(Tensor t, Tensor mask) -> bool
- dispatch:
- CPU, CUDA: NestedTensor_nested_tensor_from_mask_left_aligned
- - func: _nested_from_padded(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False) -> Tensor
- device_check: NoCheck # cpu_nested_shape_example will always be on CPU
- dispatch:
- CPU: nested_from_padded_generic
- CUDA: nested_from_padded_cuda
- autogen: _nested_from_padded.out
- # These private functions are temporary. They will be updated/deleted when nested tensors switch to using SymInts for their metadata representation
- - func: _nested_tensor_size(Tensor self) -> Tensor
- variants: method
- dispatch:
- NestedTensorCPU, NestedTensorCUDA: _nested_tensor_size
- autogen: _nested_tensor_size.out
- - func: _nested_tensor_strides(Tensor self) -> Tensor
- variants: method
- dispatch:
- NestedTensorCPU, NestedTensorCUDA: _nested_tensor_strides
- autogen: _nested_tensor_strides.out
- - func: _nested_tensor_offsets(Tensor self) -> int[]
- variants: method
- dispatch:
- NestedTensorCPU, NestedTensorCUDA: _nested_tensor_offsets
- # _nested_from_padded is not usable from Python, so
- # _nested_from_padded_and_nested_example is available for testing.
- - func: _nested_from_padded_and_nested_example(Tensor padded, Tensor nt_example) -> Tensor
- dispatch:
- NestedTensorCPU, NestedTensorCUDA: NestedTensor_from_padded_and_nested_example
- autogen: _nested_from_padded_and_nested_example.out
- # The input arguments' types to this functions are temporary. When nested tensors switch to using SymInts for their metadata representation
- # this will need to be updated
- - func: _nested_view_from_buffer(Tensor(a) self, Tensor nested_size, Tensor nested_strides, int[] offsets) -> Tensor(a)
- variants: function
- device_check: NoCheck
- dispatch:
- CPU, CUDA: _nested_view_from_buffer
- - func: _nested_view_from_buffer_copy(Tensor self, Tensor nested_size, Tensor nested_strides, int[] offsets) -> Tensor
- variants: function
- device_check: NoCheck
- tags: view_copy
- dispatch:
- CompositeExplicitAutogradNonFunctional: _nested_view_from_buffer_copy
- autogen: _nested_view_from_buffer_copy.out
- - func: _trilinear(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1) -> Tensor
- dispatch:
- # calls unsqueeze
- CompositeExplicitAutogradNonFunctional: _trilinear
- autogen: _trilinear.out
- - func: triplet_margin_loss(Tensor anchor, Tensor positive, Tensor negative, float margin=1.0, float p=2, float eps=1e-06, bool swap=False, int reduction=Mean) -> Tensor
- - func: trunc(Tensor self) -> Tensor
- structured_delegate: trunc.out
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- SparseCPU, SparseCUDA: trunc_sparse
- SparseCsrCPU, SparseCsrCUDA: trunc_sparse_csr
- tags: pointwise
- - func: trunc_(Tensor(a!) self) -> Tensor(a!)
- structured_delegate: trunc.out
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- SparseCPU, SparseCUDA: trunc_sparse_
- SparseCsrCPU, SparseCsrCUDA: trunc_sparse_csr_
- tags: pointwise
- - func: trunc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU, CUDA: trunc_out
- MPS: trunc_out_mps
- SparseCPU, SparseCUDA: trunc_sparse_out
- SparseCsrCPU, SparseCsrCUDA: trunc_sparse_csr_out
- tags: pointwise
- # Alias for trunc
- - func: fix(Tensor self) -> Tensor
- variants: function, method
- - func: fix_(Tensor(a!) self) -> Tensor(a!)
- variants: function, method
- - func: fix.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- - func: type_as(Tensor self, Tensor other) -> Tensor
- variants: method
- - func: _has_compatible_shallow_copy_type(Tensor self, Tensor from) -> bool
- variants: function
- - func: _unique(Tensor self, bool sorted=True, bool return_inverse=False) -> (Tensor, Tensor)
- variants: function
- dispatch:
- CPU: _unique_cpu
- CUDA: _unique_cuda
- autogen: _unique.out
- - func: unique_dim(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)
- variants: function
- dispatch:
- CPU: unique_dim_cpu
- CUDA: unique_dim_cuda
- tags: dynamic_output_shape
- autogen: unique_dim.out
- - func: unique_consecutive(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None) -> (Tensor, Tensor, Tensor)
- variants: function
- dispatch:
- CPU: unique_consecutive_cpu
- CUDA: unique_consecutive_cuda
- MPS: unique_consecutive_mps
- tags: dynamic_output_shape
- autogen: unique_consecutive.out
- - func: unique_dim_consecutive(Tensor self, int dim, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)
- variants: function
- dispatch:
- CPU: unique_dim_consecutive_cpu
- CUDA: unique_dim_consecutive_cuda
- MPS: unique_dim_consecutive_mps
- tags: dynamic_output_shape
- autogen: unique_dim_consecutive.out
- # _unique and _unique_dim are fragile and modifying them easily cause internal break
- # the below operator is a temporary hack for adding return_counts support
- # Please don't rely on these two operators, they will be removed soon
- - func: _unique2(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)
- variants: function
- dispatch:
- CPU: _unique2_cpu
- CUDA: _unique2_cuda
- MPS: _unique2_mps
- tags: dynamic_output_shape
- autogen: _unique2.out
- - func: _unsafe_view(Tensor self, SymInt[] size) -> Tensor
- dispatch:
- CompositeExplicitAutograd: _unsafe_view
- autogen: _unsafe_view.out
- - func: unsqueeze(Tensor(a) self, int dim) -> Tensor(a)
- variants: function, method
- device_check: NoCheck
- device_guard: False
- dispatch:
- CompositeExplicitAutograd: unsqueeze
- SparseCPU, SparseCUDA: unsqueeze_sparse
- QuantizedCPU, QuantizedCUDA: unsqueeze_quantized
- NestedTensorCPU, NestedTensorCUDA: unsqueeze_nested
- tags: core
- - func: unsqueeze_(Tensor(a!) self, int dim) -> Tensor(a!)
- variants: method
- device_check: NoCheck
- device_guard: False
- tags: inplace_view
- dispatch:
- CompositeExplicitAutograd: unsqueeze_
- - func: vander(Tensor x, int? N=None, bool increasing=False) -> Tensor
- - func: var(Tensor self, bool unbiased=True) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- cpp_no_default_args: ["unbiased"]
- - func: var.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- tags: core
- cpp_no_default_args: ["unbiased"]
- - func: var.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- CPU, CUDA: var
- MPS: var_mps
- - func: var.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- cpp_no_default_args: ["unbiased"]
- - func: var.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU, CUDA: var_out
- - func: var.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- cpp_no_default_args: ["unbiased"]
- - func: var.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- cpp_no_default_args: ["unbiased"]
- - func: var.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- - func: var.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: function
- - func: var_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)
- device_check: NoCheck # TensorIterator
- variants: function
- cpp_no_default_args: ["unbiased"]
- - func: var_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
- device_check: NoCheck # TensorIterator
- variants: function
- cpp_no_default_args: ["unbiased"]
- - func: var_mean.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor)
- device_check: NoCheck # TensorIterator
- variants: function
- dispatch:
- CPU, CUDA: var_mean
- autogen: var_mean.correction_out
- - func: var_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
- device_check: NoCheck # TensorIterator
- variants: function
- cpp_no_default_args: ["unbiased"]
- - func: var_mean.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor)
- device_check: NoCheck # TensorIterator
- variants: function
- - func: view_as(Tensor(a) self, Tensor other) -> Tensor(a)
- variants: method
- device_check: NoCheck
- device_guard: False
- - func: where.self(Tensor condition, Tensor self, Tensor other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- CPU, CUDA: where
- MPS: where_mps
- tags: [core, pointwise]
- - func: where.self_out(Tensor condition, Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU, CUDA: where_self_out
- MPS: where_self_out_mps
- - func: where.ScalarSelf(Tensor condition, Scalar self, Tensor other) -> Tensor
- variants: function
- - func: where.ScalarOther(Tensor condition, Tensor self, Scalar other) -> Tensor
- variants: function, method
- - func: where.Scalar(Tensor condition, Scalar self, Scalar other) -> Tensor
- variants: function
- - func: where(Tensor condition) -> Tensor[]
- device_check: NoCheck # TensorIterator
- variants: function
- - func: norm_except_dim(Tensor v, int pow=2, int dim=0) -> Tensor
- variants: function
- # VariableType::_weight_norm does not want to be given a gap in the autograd graph,
- # so we don't define "dispatch" variants for it.
- - func: _weight_norm(Tensor v, Tensor g, int dim=0) -> Tensor
- variants: function
- - func: _weight_norm_interface(Tensor v, Tensor g, int dim=0) -> (Tensor, Tensor)
- variants: function
- dispatch:
- CPU: weight_norm_cpu
- CUDA: weight_norm_cuda
- autogen: _weight_norm_interface.out
- - func: _weight_norm_interface_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)
- variants: function
- dispatch:
- CPU: weight_norm_backward_cpu
- CUDA: weight_norm_backward_cuda
- autogen: _weight_norm_interface_backward.out
- - func: _weight_norm_differentiable_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)
- variants: function
- - func: zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- device_check: NoCheck
- device_guard: False
- dispatch:
- CompositeExplicitAutograd: zeros
- autogen: zeros.names_out
- - func: _efficientzerotensor(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- dispatch:
- CPU: _efficientzerotensor
- CUDA: _efficientzerotensor_cuda
- Meta: _efficientzerotensor_meta
- autogen: _efficientzerotensor.out
- - func: zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- dispatch:
- CompositeExplicitAutograd: zeros_symint
- - func: zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CompositeExplicitAutograd: zeros_out
- SparseCPU, SparseCUDA, SparseMeta: zeros_sparse_out
- - func: zeros_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
- dispatch:
- # NB: Although this composite mutates on the inside, it is
- # non-differentiable so NonFunctional doesn't apply
- CompositeExplicitAutograd: zeros_like
- autogen: zeros_like.out
- - func: _standard_gamma_grad(Tensor self, Tensor output) -> Tensor
- variants: function
- dispatch:
- CPU: _standard_gamma_grad_cpu
- CUDA: _standard_gamma_grad_cuda
- autogen: _standard_gamma_grad.out
- - func: _standard_gamma(Tensor self, Generator? generator=None) -> Tensor
- variants: function
- dispatch:
- CPU: _s_gamma_cpu
- CUDA: _s_gamma_cuda
- tags: nondeterministic_seeded
- autogen: _standard_gamma.out
- - func: _dirichlet_grad(Tensor x, Tensor alpha, Tensor total) -> Tensor
- dispatch:
- CPU: _dirichlet_grad_cpu
- CUDA: _dirichlet_grad_cuda
- autogen: _dirichlet_grad.out
- - func: _sample_dirichlet(Tensor self, Generator? generator=None) -> Tensor
- tags: nondeterministic_seeded
- variants: function
- dispatch:
- CPU: _s_dirichlet_cpu
- CUDA: _s_dirichlet_cuda
- autogen: _sample_dirichlet.out
- - func: poisson(Tensor self, Generator? generator=None) -> Tensor
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU: _s_poisson_cpu
- CUDA: _s_poisson_cuda
- tags: nondeterministic_seeded
- autogen: poisson.out
- - func: binomial(Tensor count, Tensor prob, Generator? generator=None) -> Tensor
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU: _s_binomial_cpu
- CUDA: _s_binomial_cuda
- tags: nondeterministic_seeded
- autogen: binomial.out
- # When more variants get ported to native, this dispatch will get more
- # complicated
- - func: native_norm(Tensor self, Scalar p=2) -> Tensor
- dispatch:
- SparseCPU, SparseCUDA: norm_sparse
- autogen: native_norm.out
- - func: native_norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype) -> Tensor
- dispatch:
- SparseCPU, SparseCUDA: norm_sparse
- autogen: native_norm.ScalarOpt_dim_dtype_out
- # TODO: reduce signatures down to one when optional args is available
- - func: _sparse_sum(Tensor self) -> Tensor
- - func: _sparse_sum.dtype(Tensor self, *, ScalarType dtype) -> Tensor
- - func: _sparse_sum.dim(Tensor self, int[1] dim) -> Tensor
- dispatch:
- CompositeExplicitAutograd: _sparse_sum
- autogen: _sparse_sum.dim_out
- - func: _sparse_sum.dim_dtype(Tensor self, int[1] dim, *, ScalarType dtype) -> Tensor
- - func: _sparse_sum_backward(Tensor grad, Tensor self, int[] dim) -> Tensor
- dispatch:
- SparseCPU: _sparse_sum_backward_cpu
- SparseCUDA: _sparse_sum_backward_cuda
- autogen: _sparse_sum_backward.out
- - func: _sparse_csr_sum.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
- dispatch:
- SparseCsrCPU: _sparse_csr_sum_cpu
- SparseCsrCUDA: _sparse_csr_sum_cuda
- autogen: _sparse_csr_sum.dim_dtype_out
- - func: _sparse_csr_prod.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
- dispatch:
- SparseCsrCPU: _sparse_csr_prod_cpu
- SparseCsrCUDA: _sparse_csr_prod_cuda
- autogen: _sparse_csr_prod.dim_dtype_out
- - func: _sparse_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
- python_module: sparse
- variants: function
- - func: _sparse_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
- python_module: sparse
- variants: function
- - func: _sparse_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
- python_module: sparse
- dispatch:
- SparseCPU: softmax_sparse_cpu
- SparseCUDA: softmax_sparse_cuda
- autogen: _sparse_softmax.out
- - func: _sparse_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor
- dispatch:
- SparseCPU: softmax_backward_sparse_cpu
- SparseCUDA: softmax_backward_sparse_cuda
- autogen: _sparse_softmax_backward_data.out
- - func: _sparse_log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
- python_module: sparse
- variants: function
- - func: _sparse_log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
- python_module: sparse
- variants: function
- - func: _sparse_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
- python_module: sparse
- dispatch:
- SparseCPU: log_softmax_sparse_cpu
- SparseCUDA: log_softmax_sparse_cuda
- autogen: _sparse_log_softmax.out
- - func: _sparse_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor
- dispatch:
- SparseCPU: log_softmax_backward_sparse_cpu
- SparseCUDA: log_softmax_backward_sparse_cuda
- autogen: _sparse_log_softmax_backward_data.out
- - func: _spdiags(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None) -> Tensor
- python_module: sparse
- dispatch:
- CPU: spdiags
- autogen: _spdiags.out
- - func: norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: norm
- autogen: norm.ScalarOpt_dtype_out
- - func: norm.Scalar(Tensor self, Scalar p=2) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: norm
- autogen: norm.Scalar_out
- - func: norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor
- structured_delegate: norm.dtype_out
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- SparseCPU, SparseCUDA: sparse_dtype_norm
- - func: norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor
- structured_delegate: norm.out
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- SparseCPU, SparseCUDA: sparse_norm
- - func: norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)
- structured: True
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU, CUDA: norm_dtype_out
- MPS: norm_dtype_out_mps
- - func: norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU, CUDA: norm_out
- MPS: norm_out_mps
- # These four redispatch in their implementation, so OK to be CompositeImplicitAutograd
- - func: norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- - func: norm.names_ScalarOpt_dim(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- - func: norm.names_dtype_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- - func: norm.names_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- - func: frexp.Tensor(Tensor self) -> (Tensor mantissa, Tensor exponent)
- variants: method, function
- dispatch:
- CompositeExplicitAutograd: frexp
- tags: pointwise
- - func: frexp.Tensor_out(Tensor self, *, Tensor(a!) mantissa, Tensor(b!) exponent) -> (Tensor(a!) mantissa, Tensor(b!) exponent)
- dispatch:
- CPU, CUDA: frexp_out
- tags: pointwise
- # Deprecated (v.1.12)
- - func: frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
- variants: function
- # Deprecated (v.1.12)
- - func: frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
- variants: function
- # Deprecated (v.1.12)
- - func: nuclear_norm(Tensor self, bool keepdim=False) -> Tensor
- variants: function
- # Deprecated (v.1.12)
- - func: nuclear_norm.out(Tensor self, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
- variants: function
- # Deprecated (v.1.12)
- - func: nuclear_norm.dim(Tensor self, int[2] dim, bool keepdim=False) -> Tensor
- variants: function
- # Deprecated (v.1.12)
- - func: nuclear_norm.dim_out(Tensor self, int[2] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
- variants: function
- - func: clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: clone
- SparseCPU, SparseCUDA: clone_sparse
- SparseCsrCPU, SparseCsrCUDA: clone_sparse_compressed
- MkldnnCPU: mkldnn_clone
- QuantizedCPU, QuantizedCUDA: quantized_clone
- NestedTensorCPU, NestedTensorCUDA: clone_nested
- autogen: clone.out
- tags: core
- - func: positive(Tensor(a) self) -> Tensor(a)
- variants: function, method
- tags: pointwise
- - func: resize_as_(Tensor(a!) self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor(a!)
- use_const_ref_for_mutable_tensors: True
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: resize_as_
- autogen: resize_as, resize_as.out
- - func: resize_as_sparse_(Tensor(a!) self, Tensor the_template) -> Tensor(a!)
- use_const_ref_for_mutable_tensors: True
- variants: function, method
- dispatch:
- SparseCPU, SparseCUDA: resize_as_sparse_
- SparseCsrCPU, SparseCsrCUDA: resize_as_sparse_compressed_
- autogen: resize_as_sparse, resize_as_sparse.out
- - func: zero_(Tensor(a!) self) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method, function
- dispatch:
- CPU, CUDA: zero_
- MPS: zero_mps_
- Meta: zero_meta_
- SparseCPU, SparseCUDA, SparseMeta: zero_sparse_
- SparseCsrCPU, SparseCsrCUDA: zero_sparse_csr_
- MkldnnCPU: mkldnn_zero_
- autogen: zero, zero.out
- - func: sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: sub_out
- MPS: sub_out_mps
- SparseCPU, SparseCUDA: sub_out_sparse
- tags: pointwise
- - func: sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- structured_delegate: sub.out
- dispatch:
- SparseCPU, SparseCUDA: sub_sparse
- ZeroTensor: sub_zerotensor
- tags: [core, pointwise]
- - func: sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- structured_delegate: sub.out
- dispatch:
- SparseCPU, SparseCUDA: sub_sparse_
- tags: pointwise
- # For C++ only, until we have conversion from C++ numbers to Tensor
- - func: sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: sub
- tags: [core, pointwise]
- - func: sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- dispatch:
- CompositeExplicitAutograd: sub_
- autogen: sub.Scalar_out
- tags: pointwise
- # subtract, alias for sub
- - func: subtract.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
- - func: subtract.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
- variants: function, method
- - func: subtract_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
- variants: method
- # For C++ only, until we have conversion from C++ numbers to Tensor
- - func: subtract.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
- variants: function, method
- - func: subtract_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
- variants: method
- - func: rsub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function
- dispatch:
- CPU, CUDA: rsub
- autogen: rsub.Tensor_out
- - func: heaviside.out(Tensor self, Tensor values, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU, CUDA: heaviside_out
- tags: pointwise
- - func: heaviside(Tensor self, Tensor values) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- structured_delegate: heaviside.out
- tags: pointwise
- - func: heaviside_(Tensor(a!) self, Tensor values) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- structured_delegate: heaviside.out
- # For C++ only, until we have conversion from C++ numbers to Tensor
- - func: rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function
- dispatch:
- CompositeExplicitAutograd: rsub
- autogen: rsub.Scalar_out
- # Functionally the same as addmm, but we give it a different derivative formula
- # that doesn't propagate gradients to non-present entries on sparse.
- tags: pointwise
- - func: _sparse_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
- python_module: sparse
- dispatch:
- CompositeExplicitAutograd: _sparse_addmm
- autogen: _sparse_addmm.out
- - func: sparse_sampled_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
- python_module: sparse
- dispatch:
- SparseCsrCUDA: sparse_sampled_addmm_out_sparse_csr_cuda
- SparseCsrCPU: sparse_sampled_addmm_out_sparse_csr_cpu
- - func: sparse_sampled_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
- python_module: sparse
- dispatch:
- SparseCsrCUDA: sparse_sampled_addmm_sparse_csr_cuda
- SparseCsrCPU: sparse_sampled_addmm_sparse_csr_cpu
- - func: _sparse_mm_reduce_impl(Tensor self, Tensor other, str reduce) -> (Tensor, Tensor)
- python_module: sparse
- dispatch:
- SparseCsrCPU: _sparse_mm_reduce_impl_sparse_csr_cpu
- - func: _sparse_mm_reduce_impl_backward(Tensor self, Tensor grad_out, Tensor weight, str reduce, Tensor arg_out, bool[2] output_mask) -> (Tensor, Tensor)
- python_module: sparse
- dispatch:
- SparseCsrCPU: _sparse_mm_reduce_impl_backward_sparse_csr_cpu
- - func: addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
- structured: True
- dispatch:
- CPU: addmm_out_cpu
- CUDA: addmm_out_cuda
- MPS: addmm_out_mps
- SparseCPU: addmm_out_sparse_dense_cpu
- SparseCUDA: addmm_out_sparse_dense_cuda
- SparseCsrCPU: addmm_out_sparse_compressed_cpu
- SparseCsrCUDA: addmm_out_sparse_compressed_cuda
- - func: addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
- structured_delegate: addmm.out
- variants: function, method
- dispatch:
- SparseCPU: addmm_sparse_dense_cpu
- SparseCUDA: addmm_sparse_dense_cuda
- SparseCsrCPU, SparseCsrCUDA: addmm_sparse_compressed_dense
- tags: core
- - func: addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
- structured_delegate: addmm.out
- variants: method
- dispatch:
- # Warning! For whatever reason, the inplace sparse addmm is NON
- # broadcasting
- SparseCPU: s_addmm_sparse_dense_cpu_
- SparseCUDA: s_addmm_sparse_dense_cuda_
- - func: _addmm_activation.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False, Tensor(a!) out) -> Tensor(a!)
- structured: True
- dispatch:
- CPU: addmm_activation_out_cpu
- CUDA: addmm_activation_out_cuda
- - func: _addmm_activation(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False) -> Tensor
- structured_delegate: _addmm_activation.out
- variants: function, method
- # NOTE [ Sparse: autograd and API ]
- #
- #
- # Sparse Tensor Constructors
- # ~~~~~~~~~~~~~~~~~~~~~~~~~~
- #
- # The API entry points to sparse tensor construction should be
- # `sparse_coo tensor` and `_sparse_coo_tensor_unsafe`. Depending on whether the
- # indices and values tensors are given, they eventually dispatch to either
- # `sparse_coo_tensor_with_dims` or `sparse_coo_tensor_with_dims_and_tensors`.
- #
- # The autograd support for ctor is implement on `sparse_coo_tensor_with_dims_and_tensors`.
- #
- # The API methods `sparse_coo tensor` and `_sparse_coo_tensor_unsafe`
- # **must not** have specific type dispatches because otherwise codegen will
- # consider them as abstract methods (see Note [Abstract ATen methods]), dispatch
- # using **Tensor** type, and thus lose autograd tracking on the actual method
- # they dispatch to, e.g., `sparse_coo_tensor_with_dims_and_tensors`.
- #
- #
- # Sparse Methods API Design
- # ~~~~~~~~~~~~~~~~~~~~~~~~~
- #
- # Goals: 1. Flexible API for users to write custom sparse ops
- # 2. ctor and member accessor with autograd support
- #
- # To achieve 1, we need to provide a set of *dangerous* APIs (dangerous in the
- # sense that misusing them will break sparse tensor invariant and may out in
- # unexpected behavior, e.g., crash). These methods are all prefixed with
- # underscore "_" to indicate that they should be used with care. We provide:
- #
- # + `_indices()`: returns the *raw* indices within the sparse tensor (not just
- # sharing storage). Any inplace operation will change the
- # actual indices, including t_, set_, as_strided_, resize_,
- # etc.
- # + `_values()`: returns the *raw* values within the sparse tensor. Similar
- # semantics as `_indices()`
- # + `_nnz()`: returns the number of non-zero entries. This will always be
- # determined by the shapes of indices and values.
- # + `_coalesced_(bool)`: inplace sets whether the tensor is coalesced, and
- # returns itself.
- #
- # These methods are very useful in writing new operations, e.g., a custom
- # autograd Function.
- #
- # We also provide other public *safe* APIs:
- # + `indices()`: returns a **view** of the indices tensor if the sparse tensor
- # is **coalesced**.
- # + `values()`: returns a **view** of the values tensor if the containing
- # sparse tensor is **coalesced**.
- # + `sparse_dim()`: number of sparse dimensions
- # + `dense_dim()`: number of dense dimensions
- # + `is_coalesced()`: whether the sparse tensor is coalesced
- #
- # `_indices()` and `_values()` should returns the raw indices and values dense
- # tensors within a sparse tensor. They can be quite unsafe with inplace
- # operations like `t_()`, and exposes uncoalesced indices and values. The public
- # recommended API is `indices()` and `values()`, both of which first check that
- # the tensor is coalesced and return views on those tensors.
- #
- #
- # Autograd Support
- # ~~~~~~~~~~~~~~~~
- #
- # Autograd is supported on `values()` and sparse tensor ctor with indices and
- # values tensors. E.g., `torch.sparse_coo_tensor(i, v).values().sum()` is
- # differentiable w.r.t. `v`.
- #
- # NB: The `values()` and `_values()` operators are special in that they are
- # layout-aware, i.e., the output depends not just on the data it represents, but
- # also on the input layout details (in this case, the `indices` tensor). See
- # NOTE [ as_strided Backward and layout-aware/agnostic autograd ] in Functions.cpp
- # for discussion on layout-aware vs layout-agnostic autograd. Since PyTorch ops
- # operate in the layout-agnostic mode, similar to `as_strided`, backward of
- # these two operators need to consider them in a layout-agnostic way:
- # + `values()`:
- # Input is coalesced.
- # We just pretend having `input.indices()` as an additional argument
- # `input_indices`, then forward is similar to
- # `input.to(kStrided).index_select(input_indices)` regardless of the layout.
- # Note that `values()` normally is layout-aware even if we constrain
- # ourselves on sparse inputs since it may include all zeros values entries
- # as "present" entries.
- # + `_values()`:
- # Input may be uncoalesced.
- # It is not straightforward to construct a layout-agnostic version because
- # duplicate indices entries may exist and additional parameterization is
- # needed to distribute the value into different values entries. Furthermore,
- # this op is intended to provide ways to write custom sparse ops, rather
- # than being used in autograd graph, so it is marked as *non-differentiable*
- # in derivatives.yaml.
- #
- # Before reading the following, see NOTE [ Autograd Variable Views ] in
- # variable.h for details on views that are tracked by autograd, and views that
- # are not.
- #
- # Moreover, these methods return tensors that share storage with inputs, so we
- # mark these methods as view ops to support autograd history tracking.
- # The sparse tensor ctor output should technically be view of both input indices
- # and values tensors, but currently we only support setting as view of a single
- # Variable, so it is only view of the values tensor.
- # TODO: clone indices in sparse tensor ctor.
- #
- # For other methods that return outputs that share storage with inputs, i.e.,
- # `indices()` and `_indices()`. We mark their outputs as non-differentiable, so
- # the view relation is not tracked by autograd, but the version counter is still
- # shared. In other words, their outputs are non-differentiable views of the
- # sparse tensor.
- # FIXME: would be nicer if TensorOptions was optional based; not adding default arguments for options given
- # the default would never make sense.
- - func: sparse_compressed_tensor.comp_plain_value_size(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
- - func: sparse_csr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
- - func: sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
- - func: sparse_bsr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
- - func: sparse_bsc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
- - func: sparse_compressed_tensor.comp_plain_value(Tensor compressed_indices, Tensor plain_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
- - func: sparse_csr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
- - func: sparse_csc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
- - func: sparse_bsr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
- - func: sparse_bsc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
- - func: _sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- - func: _sparse_csr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- - func: _sparse_csc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- - func: _sparse_bsr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- - func: _sparse_bsc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- - func: sparse_coo_tensor.size(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
- dispatch:
- CompositeExplicitAutograd: sparse_coo_tensor
- autogen: sparse_coo_tensor.size_out
- - func: sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- - func: sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- - func: _sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- dispatch:
- CompositeImplicitAutograd: _sparse_coo_tensor_unsafe_symint
- - func: _validate_sparse_coo_tensor_args(Tensor indices, Tensor values, int[] size) -> ()
- - func: _validate_sparse_compressed_tensor_args(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, Layout layout) -> ()
- - func: _validate_sparse_csr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> ()
- - func: _validate_sparse_csc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> ()
- - func: _validate_sparse_bsr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> ()
- - func: _validate_sparse_bsc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> ()
- - func: _sparse_coo_tensor_with_dims(int sparse_dim, int dense_dim, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
- dispatch:
- SparseCPU, SparseCUDA, SparseMeta, Meta: new_with_dims_sparse
- autogen: _sparse_coo_tensor_with_dims.out
- - func: _sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
- dispatch:
- SparseCPU, SparseCUDA, SparseMeta, Meta: new_with_dims_and_tensor_sparse_symint
- autogen: _sparse_coo_tensor_with_dims_and_tensors.out
- - func: sparse_resize_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)
- use_const_ref_for_mutable_tensors: True
- variants: method
- dispatch:
- SparseCPU, SparseCUDA, SparseMeta: sparse_resize_
- autogen: sparse_resize, sparse_resize.out
- - func: sparse_resize_and_clear_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)
- use_const_ref_for_mutable_tensors: True
- variants: method
- dispatch:
- SparseCPU, SparseCUDA, SparseMeta: sparse_resize_and_clear_
- autogen: sparse_resize_and_clear, sparse_resize_and_clear.out
- - func: sparse_mask(Tensor self, Tensor mask) -> Tensor
- variants: method
- dispatch:
- SparseCPU, SparseCUDA: sparse_mask
- SparseCsrCPU, SparseCsrCUDA: sparse_mask_sparse_csr
- autogen: sparse_mask.out
- - func: _to_cpu(Tensor[] tensors) -> Tensor[]
- variants: function
- - func: to_dense(Tensor self, ScalarType? dtype=None) -> Tensor
- variants: method
- # Special case of to_dense with custom derivative
- - func: _to_dense(Tensor self, ScalarType? dtype=None) -> Tensor
- variants: method
- dispatch:
- SparseCPU, SparseCUDA: sparse_to_dense
- SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_dense
- MkldnnCPU: mkldnn_to_dense
- autogen: _to_dense.out
- - func: to_dense_backward(Tensor grad, Tensor input) -> Tensor
- - func: sparse_dim(Tensor self) -> int
- variants: method
- dispatch:
- CPU, CUDA: sparse_dim_strided
- SparseCPU, SparseCUDA, SparseMeta: sparse_dim_sparse
- SparseCsrCPU, SparseCsrCUDA: sparse_dim_sparse_csr
- device_check: NoCheck
- device_guard: False
- # legacy method
- - func: _dimI(Tensor self) -> int
- variants: method
- dispatch:
- SparseCPU, SparseCUDA: sparse_dim_sparse
- device_check: NoCheck
- device_guard: False
- - func: dense_dim(Tensor self) -> int
- variants: method
- dispatch:
- CPU, CUDA: dense_dim_strided
- SparseCPU, SparseCUDA, SparseMeta: dense_dim_sparse
- SparseCsrCPU, SparseCsrCUDA: dense_dim_sparse_csr
- device_check: NoCheck
- device_guard: False
- # legacy method
- - func: _dimV(Tensor self) -> int
- variants: method
- dispatch:
- SparseCPU, SparseCUDA, SparseMeta: dense_dim_sparse
- device_check: NoCheck
- device_guard: False
- - func: _nnz(Tensor self) -> int
- variants: method
- dispatch:
- SparseCPU, SparseCUDA, SparseMeta: _nnz_sparse
- SparseCsrCPU, SparseCsrCUDA: _nnz_sparse_csr
- device_check: NoCheck
- device_guard: False
- # NOTE: [ coalesce autograd ]
- # coalesce returns self directly for already coalesced sparse tensors.
- # This means coalesce cannot have a derivative registered, otherwise it creates
- # circular references in the autograd graph (see gh-52874).
- # Instead, the derivative is registered on the slow-path "_coalesce"
- - func: coalesce(Tensor(a) self) -> Tensor(a)
- variants: method
- - func: _coalesce(Tensor self) -> Tensor
- dispatch:
- SparseCPU: _coalesce_sparse_cpu
- SparseCUDA: _coalesce_sparse_cuda
- autogen: _coalesce.out
- - func: is_coalesced(Tensor self) -> bool
- variants: method
- dispatch:
- SparseCPU, SparseCUDA, SparseMeta: is_coalesced_sparse
- CompositeExplicitAutograd: is_coalesced_default
- device_check: NoCheck
- device_guard: False
- - func: _indices(Tensor(a) self) -> Tensor(a)
- variants: method
- dispatch:
- SparseCPU, SparseCUDA, SparseMeta: _indices_sparse
- device_check: NoCheck
- device_guard: False
- - func: _values(Tensor(a) self) -> Tensor(a)
- variants: method
- dispatch:
- SparseCPU, SparseCUDA, SparseMeta: _values_sparse
- device_check: NoCheck
- device_guard: False
- # This method doesn't do any check but only directly sets the flag. So it can be
- # a bit unsafe. Similar to _indices and _values, this is useful for implementing
- # custom sparse operations in Python/C++ extension.
- - func: _coalesced_(Tensor(a!) self, bool coalesced) -> Tensor(a!)
- variants: method
- dispatch:
- SparseCPU, SparseCUDA, SparseMeta: _coalesced_sparse_
- device_check: NoCheck
- device_guard: False
- autogen: _coalesced, _coalesced.out
- - func: indices(Tensor(a) self) -> Tensor(a)
- variants: method
- dispatch:
- SparseCPU, SparseCUDA, SparseMeta: indices_sparse
- CompositeExplicitAutograd: indices_default
- device_check: NoCheck
- device_guard: False
- - func: values(Tensor(a) self) -> Tensor(a)
- variants: method
- dispatch:
- SparseCPU, SparseCUDA, SparseMeta: values_sparse
- SparseCsrCPU, SparseCsrCUDA: values_sparse_csr
- NestedTensorCPU, NestedTensorCUDA: values_nested
- CompositeExplicitAutograd: values_default
- device_check: NoCheck
- device_guard: False
- - func: crow_indices(Tensor(a) self) -> Tensor(a)
- variants: method
- dispatch:
- SparseCsrCPU, SparseCsrCUDA: crow_indices_sparse_csr
- CompositeExplicitAutograd: crow_indices_default
- device_check: NoCheck
- device_guard: False
- - func: col_indices(Tensor(a) self) -> Tensor(a)
- variants: method
- dispatch:
- SparseCsrCPU, SparseCsrCUDA: col_indices_sparse_csr
- CompositeExplicitAutograd: col_indices_default
- device_check: NoCheck
- device_guard: False
- - func: ccol_indices(Tensor(a) self) -> Tensor(a)
- variants: method
- dispatch:
- SparseCsrCPU, SparseCsrCUDA: ccol_indices_sparse_csr
- CompositeExplicitAutograd: ccol_indices_default
- device_check: NoCheck
- device_guard: False
- - func: row_indices(Tensor(a) self) -> Tensor(a)
- variants: method
- dispatch:
- SparseCsrCPU, SparseCsrCUDA: row_indices_sparse_csr
- CompositeExplicitAutograd: row_indices_default
- device_check: NoCheck
- device_guard: False
- - func: hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- SparseCPU: hspmm_out_sparse_cpu
- SparseCUDA: hspmm_out_sparse_cuda
- - func: hspmm(Tensor mat1, Tensor mat2) -> Tensor
- dispatch:
- SparseCPU: hspmm_sparse_cpu
- SparseCUDA: hspmm_sparse_cuda
- - func: copy_sparse_to_sparse_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)
- device_check: NoCheck # Allows copy into different device
- variants: function
- dispatch:
- SparseCPU, SparseCUDA: copy_sparse_
- autogen: copy_sparse_to_sparse, copy_sparse_to_sparse.out
- # By adding the AutogradNestedTensor this makes this function CompositeImplicit-like for nested tensors
- - func: unbind.int(Tensor(a -> *) self, int dim=0) -> Tensor(a)[]
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: unbind
- CompositeImplicitAutogradNestedTensor: NestedTensor_unbind
- - func: unbind.Dimname(Tensor(a -> *) self, Dimname dim) -> Tensor(a)[]
- variants: function, method
- - func: to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor
- variants: method
- dispatch:
- CPU, CUDA: dense_to_sparse
- SparseCPU, SparseCUDA: sparse_coo_to_sparse
- SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_sparse
- autogen: to_sparse.sparse_dim_out
- - func: to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor
- variants: method
- dispatch:
- CPU, CUDA: dense_to_sparse
- SparseCPU, SparseCUDA: sparse_coo_to_sparse
- SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_sparse
- autogen: to_sparse.out
- - func: to_sparse_csr(Tensor self, int? dense_dim=None) -> Tensor
- variants: method
- dispatch:
- CPU, CUDA: dense_to_sparse_csr
- SparseCPU, SparseCUDA: coo_to_sparse_csr
- SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_sparse_csr
- autogen: to_sparse_csr.out
- - func: to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor
- variants: method
- dispatch:
- CPU, CUDA: dense_to_sparse_csc
- SparseCPU, SparseCUDA: coo_to_sparse_csc
- SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_sparse_csc
- autogen: to_sparse_csc.out
- - func: to_sparse_bsr(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor
- variants: method
- dispatch:
- CPU, CUDA: dense_to_sparse_bsr
- SparseCPU, SparseCUDA: coo_to_sparse_bsr
- SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_sparse_bsr
- autogen: to_sparse_bsr.out
- - func: to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor
- variants: method
- dispatch:
- CPU, CUDA: dense_to_sparse_bsc
- SparseCPU, SparseCUDA: coo_to_sparse_bsc
- SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_sparse_bsc
- autogen: to_sparse_bsc.out
- - func: to_mkldnn(Tensor self, ScalarType? dtype=None) -> Tensor
- variants: method
- dispatch:
- CPU: dense_to_mkldnn
- autogen: to_mkldnn.out
- - func: mkldnn_reorder_conv2d_weight(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1, int[]? input_size=None) -> Tensor
- variants: function
- python_module: nn
- dispatch:
- MkldnnCPU: mkldnn_reorder_conv2d_weight
- autogen: mkldnn_reorder_conv2d_weight.out
- - func: mkldnn_reorder_conv3d_weight(Tensor self, int[3] padding=0, int[3] stride=1, int[3] dilation=1, int groups=1) -> Tensor
- variants: function
- python_module: nn
- dispatch:
- MkldnnCPU: mkldnn_reorder_conv3d_weight
- autogen: mkldnn_reorder_conv3d_weight.out
- - func: to_mkldnn_backward(Tensor grad, Tensor input) -> Tensor
- - func: quantize_per_tensor_dynamic(Tensor self, ScalarType dtype, bool reduce_range) -> Tensor
- variants: function
- dispatch:
- CPU, CUDA: quantize_per_tensor_dynamic
- autogen: quantize_per_tensor_dynamic.out
- - func: quantize_per_tensor(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor
- variants: function
- dispatch:
- CPU, CUDA: quantize_per_tensor
- autogen: quantize_per_tensor.out
- - func: quantize_per_tensor.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype) -> Tensor
- variants: function
- dispatch:
- CPU, CUDA: quantize_per_tensor_tensor_qparams
- autogen: quantize_per_tensor.tensor_qparams_out
- - func: quantize_per_tensor.tensors(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype) -> Tensor[]
- variants: function
- dispatch:
- CPU: quantize_per_tensor_list_cpu
- autogen: quantize_per_tensor.tensors_out
- - func: quantize_per_channel(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype) -> Tensor
- variants: function
- dispatch:
- CPU, CUDA: quantize_per_channel
- autogen: quantize_per_channel.out
- - func: dequantize.self(Tensor self) -> Tensor
- variants: function, method
- dispatch:
- CPU, CUDA: dequantize_cpu_or_cuda
- QuantizedCPU, QuantizedCUDA: dequantize_quantized
- autogen: dequantize.self_out
- - func: dequantize.tensors(Tensor[] tensors) -> Tensor[]
- variants: function
- dispatch:
- QuantizedCPU: dequantize_tensors_quantized_cpu
- autogen: dequantize.tensors_out
- - func: q_scale(Tensor self) -> float
- variants: function, method
- dispatch:
- QuantizedCPU, QuantizedCUDA: q_scale_quant
- - func: q_zero_point(Tensor self) -> int
- variants: function, method
- dispatch:
- QuantizedCPU, QuantizedCUDA: q_zero_point_quant
- - func: q_per_channel_scales(Tensor self) -> Tensor
- variants: function, method
- dispatch:
- QuantizedCPU, QuantizedCUDA: q_per_channel_scales
- autogen: q_per_channel_scales.out
- - func: q_per_channel_zero_points(Tensor self) -> Tensor
- variants: function, method
- dispatch:
- QuantizedCPU, QuantizedCUDA: q_per_channel_zero_points
- autogen: q_per_channel_zero_points.out
- - func: q_per_channel_axis(Tensor self) -> int
- variants: function, method
- dispatch:
- QuantizedCPU, QuantizedCUDA: q_per_channel_axis
- - func: int_repr(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- QuantizedCPU: int_repr_quantized_cpu
- QuantizedCUDA: int_repr_quantized_cuda
- autogen: int_repr.out
- - func: _make_per_tensor_quantized_tensor(Tensor self, float scale, int zero_point) -> Tensor
- dispatch:
- CPU: make_per_tensor_quantized_tensor_cpu
- CUDA: make_per_tensor_quantized_tensor_cuda
- autogen: _make_per_tensor_quantized_tensor.out
- - func: _make_per_channel_quantized_tensor(Tensor self, Tensor scale, Tensor zero_point, int axis) -> Tensor
- dispatch:
- CPU: make_per_channel_quantized_tensor_cpu
- CUDA: make_per_channel_quantized_tensor_cuda
- autogen: _make_per_channel_quantized_tensor.out
- - func: qscheme(Tensor self) -> QScheme
- variants: method
- dispatch:
- QuantizedCPU, QuantizedCUDA: qscheme_quant
- - func: fake_quantize_per_tensor_affine(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function
- - func: fake_quantize_per_tensor_affine.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function
- - func: fake_quantize_per_tensor_affine_cachemask(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> (Tensor output, Tensor mask)
- variants: function
- dispatch:
- CPU, CUDA: fake_quantize_per_tensor_affine_cachemask
- autogen: fake_quantize_per_tensor_affine_cachemask.out
- - func: _fake_quantize_per_tensor_affine_cachemask_tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max) -> (Tensor output, Tensor mask)
- variants: function
- dispatch:
- CPU, CUDA: _fake_quantize_per_tensor_affine_cachemask_tensor_qparams
- autogen: _fake_quantize_per_tensor_affine_cachemask_tensor_qparams.out
- - func: fake_quantize_per_tensor_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor
- variants: function
- - func: _fake_quantize_learnable_per_tensor_affine(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor
- variants: function
- dispatch:
- CPU, CUDA: _fake_quantize_learnable_per_tensor_affine
- autogen: _fake_quantize_learnable_per_tensor_affine.out
- - func: _fake_quantize_learnable_per_tensor_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor)
- variants: function
- dispatch:
- CPU, CUDA: _fake_quantize_learnable_per_tensor_affine_backward
- - func: fake_quantize_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function
- - func: fake_quantize_per_channel_affine_cachemask(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> (Tensor output, Tensor mask)
- variants: function
- dispatch:
- CPU, CUDA: fake_quantize_per_channel_affine_cachemask
- autogen: fake_quantize_per_channel_affine_cachemask.out
- - func: fake_quantize_per_channel_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor
- variants: function
- - func: _fake_quantize_learnable_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor
- variants: function
- dispatch:
- CPU, CUDA: _fake_quantize_learnable_per_channel_affine
- autogen: _fake_quantize_learnable_per_channel_affine.out
- - func: _fake_quantize_learnable_per_channel_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor)
- variants: function
- dispatch:
- CPU, CUDA: _fake_quantize_learnable_per_channel_affine_backward
- - func: fused_moving_avg_obs_fake_quant(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> Tensor
- variants: function
- - func: _fused_moving_avg_obs_fq_helper(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask)
- dispatch:
- CPU: fused_moving_avg_obs_fake_quant_cpu
- CUDA: fused_moving_avg_obs_fake_quant_cuda
- autogen: _fused_moving_avg_obs_fq_helper_functional, _fused_moving_avg_obs_fq_helper.out
- - func: _choose_qparams_per_tensor(Tensor self, bool reduce_range=False) -> (float, int)
- variants: function
- - func: _saturate_weight_to_fp16(Tensor weight) -> Tensor
- variants: function
- - func: choose_qparams_optimized(Tensor input, int numel, int n_bins, float ratio, int bit_width) -> (Tensor, Tensor)
- variants: function
- - func: _autocast_to_reduced_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled, ScalarType cuda_dtype, ScalarType cpu_dtype) -> Tensor(a)
- variants: method
- device_guard: False
- - func: _autocast_to_full_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled) -> Tensor(a)
- variants: method
- device_guard: False
- - func: _to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor
- device_check: NoCheck
- device_guard: False
- dispatch:
- CompositeExplicitAutograd: _to_copy
- NestedTensorCPU, NestedTensorCUDA: _to_copy_nested
- autogen: _to_copy.out
- tags: core
- # to(Device) must not exist because all constructors of Device also works for
- # TensorOptions. Otherwise, an ambiguity error is thrown.
- # See NOTE [ TensorOptions Constructors ].
- - func: to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
- variants: method
- device_check: NoCheck
- device_guard: False
- - func: to.device(Tensor(a) self, Device device, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
- variants: method
- device_check: NoCheck
- device_guard: False
- - func: to.dtype(Tensor(a) self, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
- variants: method
- device_check: NoCheck
- device_guard: False
- - func: to.other(Tensor(a) self, Tensor other, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
- variants: method
- device_check: NoCheck
- device_guard: False
- - func: meshgrid(Tensor[] tensors) -> Tensor[]
- # TODO: Two weeks after this lands, combine these two overloads,
- # making "indexing" optional. These are temporarily distinct for
- # forward-compatibility reasons.
- - func: meshgrid.indexing(Tensor[] tensors, *, str indexing) -> Tensor[]
- - func: cartesian_prod(Tensor[] tensors) -> Tensor
- variants: function
- - func: combinations(Tensor self, int r=2, bool with_replacement=False) -> Tensor
- variants: function
- - func: item(Tensor self) -> Scalar
- tags: data_dependent_output
- variants: method
- - func: result_type.Tensor(Tensor tensor, Tensor other) -> ScalarType
- variants: function
- - func: result_type.Scalar(Tensor tensor, Scalar other) -> ScalarType
- variants: function
- - func: result_type.Scalar_Tensor(Scalar scalar, Tensor tensor) -> ScalarType
- variants: function
- - func: result_type.Scalar_Scalar(Scalar scalar1, Scalar scalar2) -> ScalarType
- - func: can_cast(ScalarType from, ScalarType to) -> bool
- variants: function
- - func: promote_types(ScalarType type1, ScalarType type2) -> ScalarType
- variants: function
- # NB: Does NOT check precondition that numel == 1
- - func: _local_scalar_dense(Tensor self) -> Scalar
- tags: data_dependent_output
- dispatch:
- CPU: _local_scalar_dense_cpu
- CUDA: _local_scalar_dense_cuda
- MPS: _local_scalar_dense_mps
- variants: function
- # MPS LSTM implementation
- - func: _lstm_mps(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor)
- dispatch:
- MPS: _lstm_mps
- autogen: _lstm_mps.out
- - func: lstm_mps_backward(Tensor grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor[], Tensor[])
- dispatch:
- MPS: lstm_mps_backward
- autogen: lstm_mps_backward.out
- # Fused RNN kernels
- - func: _thnn_fused_lstm_cell(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor, Tensor)
- dispatch:
- CUDA: _thnn_fused_lstm_cell_cuda
- autogen: _thnn_fused_lstm_cell.out
- # NB: The composite version of this function below is a simple wrapper that duplicates some of the outputs
- # It is necessary to avoid triggering TensorImpl use count checks in debug mode
- # NB: this is function is NOT differentiable
- - func: _thnn_fused_lstm_cell_backward_impl(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor)
- dispatch:
- CUDA: _thnn_fused_lstm_cell_backward_impl_cuda
- autogen: _thnn_fused_lstm_cell_backward_impl.out
- - func: _thnn_fused_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
- - func: _thnn_differentiable_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor input_gates, Tensor hidden_gates, Tensor? input_bias, Tensor? hidden_bias, Tensor cx, Tensor cy) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
- - func: _thnn_fused_gru_cell(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor)
- dispatch:
- CUDA: _thnn_fused_gru_cell_cuda
- autogen: _thnn_fused_gru_cell.out
- - func: _thnn_fused_gru_cell_backward(Tensor grad_hy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
- dispatch:
- CUDA: _thnn_fused_gru_cell_backward_cuda
- autogen: _thnn_fused_gru_cell_backward.out
- - func: _thnn_differentiable_gru_cell_backward(Tensor grad_hy, Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias, Tensor? hidden_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
- # RNN cells and layers
- - func: lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor)
- - func: lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor)
- - func: gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
- - func: gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)
- - func: rnn_tanh.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
- - func: rnn_tanh.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)
- - func: rnn_relu.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
- - func: rnn_relu.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)
- - func: lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor)
- - func: gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor
- - func: rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor
- - func: rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor
- # Quantized RNN layer registration has been moved to C10 dispatch in `RNN.cpp`
- # Quantized RNN layers
- # - func: quantized_lstm(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, ScalarType? dtype=None, bool use_dynamic=False) -> (Tensor, Tensor, Tensor)
- # - func: quantized_lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, *, ScalarType? dtype=None, bool use_dynamic=False) -> (Tensor, Tensor, Tensor)
- # Quantized GRU layers
- # - func: quantized_gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
- #
- # - func: quantized_gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)
- #
- # Quantized RNN cells
- - func: quantized_lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> (Tensor, Tensor)
- - func: quantized_gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor
- - func: quantized_rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor
- - func: quantized_rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor
- # PackedSequence utilities
- - func: _pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first) -> (Tensor, Tensor)
- dispatch:
- CompositeExplicitAutograd: _pack_padded_sequence
- autogen: _pack_padded_sequence.out
- - func: _pack_padded_sequence_backward(Tensor grad, SymInt[] input_size, Tensor batch_sizes, bool batch_first) -> Tensor
- dispatch:
- CompositeImplicitAutograd: _pack_padded_sequence_backward_symint
- - func: _pad_packed_sequence(Tensor data, Tensor batch_sizes, bool batch_first, Scalar padding_value, int total_length) -> (Tensor, Tensor)
- # wrappers for legacy TH methods
- - func: set_.source_Storage(Tensor(a!) self, Storage source) -> Tensor(a!)
- variants: method
- device_check: NoCheck
- device_guard: False
- dispatch:
- CPU, CUDA, Meta, MPS: set_
- autogen: set.source_Storage, set.source_Storage_out
- - func: set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!)
- variants: method
- device_check: NoCheck
- device_guard: False
- dispatch:
- CPU: set_storage_cpu_
- Meta: set_storage_meta__symint
- CUDA: set_storage_cuda_
- MPS: set_storage_mps_
- QuantizedCPU, QuantizedCUDA: set_storage_quantized_
- autogen: set.source_Storage_storage_offset, set.source_Storage_storage_offset_out
- - func: set_.source_Tensor_storage_offset(Tensor(a!) self, Tensor source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!)
- variants: method
- device_check: NoCheck
- device_guard: False
- dispatch:
- CompositeImplicitAutograd: set__symint
- - func: set_.source_Tensor(Tensor(a!) self, Tensor source) -> Tensor(a!)
- variants: method
- device_check: NoCheck
- device_guard: False
- dispatch:
- CPU, CUDA, Meta, MPS: set_tensor_
- autogen: set.source_Tensor, set.source_Tensor_out
- - func: set_(Tensor(a!) self) -> Tensor(a!)
- variants: method
- dispatch:
- CPU: set_cpu_
- CUDA: set_cuda_
- Meta: set_meta_
- MPS: set_mps_
- autogen: set, set.out
- # Not making it CompositeImplicitAutograd because lift
- # should be a primitive w.r.t. functorch
- # TODO: this should have a view annotation
- # TODO: shouldn't be a method
- - func: lift(Tensor self) -> Tensor
- dispatch:
- CompositeExplicitAutograd: lift
- autogen: lift.out
- # lift_fresh is called with an argument that is guaranteed to be
- # fresh (i.e., newly allocated). This is ONLY called from a
- # torch.tensor call; if you FX trace a lift_fresh, you are obligated
- # to convert this into a lift_fresh_copy (because FX will violate the
- # freshness invariant when tracing).
- - func: lift_fresh(Tensor(a) self) -> Tensor(a)
- dispatch:
- CompositeExplicitAutograd: lift_fresh
- # Like lift, but it clones the input.
- - func: lift_fresh_copy(Tensor self) -> Tensor
- tags: view_copy
- dispatch:
- CompositeExplicitAutogradNonFunctional: lift_fresh_copy
- autogen: lift_fresh_copy.out
- - func: is_set_to(Tensor self, Tensor tensor) -> bool
- variants: method
- device_check: NoCheck
- device_guard: False
- dispatch:
- CPU, CUDA, MPS: is_set_to
- - func: masked_fill_.Scalar(Tensor(a!) self, Tensor mask, Scalar value) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- dispatch:
- CPU: masked_fill__cpu
- CUDA: masked_fill__cuda
- QuantizedCPU: masked_fill__quantized_cpu
- QuantizedCUDA: masked_fill__quantized_cuda
- MPS: masked_fill__mps
- autogen: masked_fill.Scalar_out
- - func: masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: masked_fill
- tags: pointwise
- - func: masked_fill_.Tensor(Tensor(a!) self, Tensor mask, Tensor value) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- dispatch:
- CPU: masked_fill__cpu
- CUDA: masked_fill__cuda
- QuantizedCPU: masked_fill__quantized_cpu
- QuantizedCUDA: masked_fill__quantized_cuda
- MPS: masked_fill__mps
- autogen: masked_fill.Tensor_out
- - func: masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: masked_fill
- - func: masked_scatter_(Tensor(a!) self, Tensor mask, Tensor source) -> Tensor(a!)
- variants: method
- dispatch:
- CPU: masked_scatter__cpu
- CUDA: masked_scatter__cuda
- autogen: masked_scatter.out
- - func: masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: masked_scatter
- - func: _masked_softmax(Tensor self, Tensor mask, int? dim=None, int? mask_type=None) -> Tensor
- dispatch:
- CUDA: masked_softmax_cuda
- CPU: masked_softmax_cpu
- autogen: _masked_softmax.out
- - func: _masked_softmax_backward(Tensor grad_output, Tensor output, Tensor mask, int? dim=None) -> Tensor
- dispatch:
- CUDA: masked_softmax_backward_cuda
- CPU: masked_softmax_backward_cpu
- autogen: _masked_softmax_backward.out
- - func: view(Tensor(a) self, SymInt[] size) -> Tensor(a)
- variants: method
- device_check: NoCheck
- device_guard: False
- dispatch:
- ZeroTensor, Meta, CPU, CUDA, QuantizedCPU, QuantizedCUDA, MPS: view
- MkldnnCPU: mkldnn_view
- NestedTensorCPU, NestedTensorCUDA: view_nested
- tags: core
- # Warning: If you want to change the name or overload name of this
- # operator, you might also want to change the `isBlockListedSchema`
- # function in `torch/csrc/jit/frontend/schema_catching.cpp`.
- # The name and overload name of this operator is hardcoded in that
- # function in order to workaround a bug:
- # https://github.com/pytorch/pytorch/issues/47964
- - func: view.dtype(Tensor(a) self, ScalarType dtype) -> Tensor(a)
- variants: method
- device_check: NoCheck
- device_guard: False
- dispatch:
- CompositeExplicitAutograd: view_dtype
- - func: put_(Tensor(a!) self, Tensor index, Tensor source, bool accumulate=False) -> Tensor(a!)
- variants: method
- dispatch:
- CPU, CUDA: put_
- autogen: put.out
- - func: put(Tensor self, Tensor index, Tensor source, bool accumulate=False) -> Tensor
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: put
- - func: index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
- structured: True
- variants: function
- precomputed:
- - dim -> int dim
- dispatch:
- CPU: index_add_cpu_out
- CUDA: index_add_cuda_out
- MPS: index_add_mps_out
- - func: index_add_(Tensor(a!) self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor(a!)
- structured_delegate: index_add.out
- variants: method
- - func: index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor
- structured_delegate: index_add.out
- variants: function, method
- - func: index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor
- variants: function, method
- - func: index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)
- structured: True
- variants: function
- precomputed:
- - dim -> int dim
- dispatch:
- CPU: index_reduce_cpu_out
- CUDA: index_reduce_cuda_out
- - func: index_reduce_(Tensor(a!) self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor(a!)
- structured_delegate: index_reduce.out
- variants: method
- - func: index_reduce(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor
- structured_delegate: index_reduce.out
- variants: function, method
- - func: index_fill_.int_Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- dispatch:
- CPU: index_fill_
- CUDA: index_fill_
- autogen: index_fill.int_Scalar_out
- - func: index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: index_fill
- - func: index_fill_.int_Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- dispatch:
- CPU, CUDA: index_fill_
- autogen: index_fill.int_Tensor_out
- - func: index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- dispatch:
- CompositeExplicitAutograd: index_fill
- - func: index_fill_.Dimname_Scalar(Tensor(a!) self, Dimname dim, Tensor index, Scalar value) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- - func: index_fill_.Dimname_Tensor(Tensor(a!) self, Dimname dim, Tensor index, Tensor value) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- - func: index_fill.Dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- - func: index_fill.Dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- - func: scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor
- structured_delegate: scatter.src_out
- variants: function, method
- - func: scatter_.src(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)
- structured_delegate: scatter.src_out
- variants: method
- - func: scatter.src_out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- variants: function
- dispatch:
- CPU, CUDA: scatter_src_out
- MPS: scatter_src_out_mps
- - func: scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor
- structured_delegate: scatter.value_out
- variants: function, method
- - func: scatter_.value(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)
- structured_delegate: scatter.value_out
- variants: method
- - func: scatter.value_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- variants: function
- dispatch:
- CPU, CUDA: scatter_value_out
- MPS: scatter_value_out_mps
- - func: scatter.reduce(Tensor self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor
- structured_delegate: scatter.reduce_out
- variants: function, method
- - func: scatter_.reduce(Tensor(a!) self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor(a!)
- structured_delegate: scatter.reduce_out
- variants: method
- - func: scatter.reduce_out(Tensor self, int dim, Tensor index, Tensor src, *, str reduce, Tensor(a!) out) -> Tensor(a!)
- structured: True
- variants: function
- dispatch:
- CPU, CUDA: scatter_reduce_out
- MPS: scatter_reduce_out_mps
- - func: scatter.value_reduce(Tensor self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor
- structured_delegate: scatter.value_reduce_out
- variants: function, method
- - func: scatter_.value_reduce(Tensor(a!) self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor(a!)
- structured_delegate: scatter.value_reduce_out
- variants: method
- - func: scatter.value_reduce_out(Tensor self, int dim, Tensor index, Scalar value, *, str reduce, Tensor(a!) out) -> Tensor(a!)
- structured: True
- variants: function
- dispatch:
- CPU, CUDA: scatter_value_reduce_out
- MPS: scatter_value_reduce_out_mps
- - func: scatter.dimname_src(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor
- variants: function, method
- - func: scatter.dimname_value(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor
- variants: function, method
- - func: scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor
- structured_delegate: scatter_add.out
- variants: function, method
- tags: core
- - func: scatter_add_(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)
- structured_delegate: scatter_add.out
- variants: method
- - func: scatter_add.out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- variants: function
- dispatch:
- CPU, CUDA: scatter_add
- MPS: scatter_add_mps_out
- - func: scatter_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor
- variants: function, method
- - func: scatter_reduce.two(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor
- structured_delegate: scatter_reduce.two_out
- variants: function, method
- tags: core
- - func: scatter_reduce_.two(Tensor(a!) self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor(a!)
- structured_delegate: scatter_reduce.two_out
- variants: method
- - func: scatter_reduce.two_out(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)
- structured: True
- variants: function
- dispatch:
- CPU, CUDA: scatter_reduce_two
- - func: eq_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
- structured_delegate: eq.Scalar_out
- device_check: NoCheck # TensorIterator
- variants: method
- - func: eq_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
- structured_delegate: eq.Tensor_out
- device_check: NoCheck # TensorIterator
- variants: method
- - func: bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- variants: function
- dispatch:
- CPU, CUDA: bitwise_and_out
- tags: pointwise
- - func: bitwise_and.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: function
- dispatch:
- CompositeExplicitAutograd: bitwise_and_out
- tags: pointwise
- - func: bitwise_and.Scalar(Tensor self, Scalar other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: method, function
- dispatch:
- CompositeExplicitAutograd: bitwise_and
- tags: pointwise
- - func: bitwise_and.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function
- dispatch:
- CompositeExplicitAutograd: bitwise_and
- autogen: bitwise_and.Scalar_Tensor_out
- tags: pointwise
- - func: bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: method, function
- structured_delegate: bitwise_and.Tensor_out
- tags: [core, pointwise]
- - func: bitwise_and_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- tags: pointwise
- - func: bitwise_and_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- structured_delegate: bitwise_and.Tensor_out
- tags: pointwise
- - func: __and__.Scalar(Tensor self, Scalar other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: method, function
- - func: __and__.Tensor(Tensor self, Tensor other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: method, function
- - func: __iand__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- - func: __iand__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- - func: bitwise_or.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- variants: function
- dispatch:
- CPU, CUDA: bitwise_or_out
- tags: pointwise
- - func: bitwise_or.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: function
- dispatch:
- CompositeExplicitAutograd: bitwise_or_out
- tags: pointwise
- - func: bitwise_or.Scalar(Tensor self, Scalar other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: method, function
- tags: pointwise
- - func: bitwise_or.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function
- dispatch:
- CompositeExplicitAutograd: bitwise_or
- autogen: bitwise_or.Scalar_Tensor_out
- tags: pointwise
- - func: bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: method, function
- structured_delegate: bitwise_or.Tensor_out
- tags: [core, pointwise]
- - func: bitwise_or_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- tags: pointwise
- - func: bitwise_or_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- structured_delegate: bitwise_or.Tensor_out
- tags: pointwise
- - func: __or__.Scalar(Tensor self, Scalar other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: method, function
- - func: __or__.Tensor(Tensor self, Tensor other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: method, function
- - func: __ior__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- - func: __ior__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- - func: bitwise_xor.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- variants: function
- dispatch:
- CPU, CUDA: bitwise_xor_out
- tags: pointwise
- - func: bitwise_xor.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: function
- dispatch:
- CompositeExplicitAutograd: bitwise_xor_out
- tags: pointwise
- - func: bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: method, function
- tags: pointwise
- - func: bitwise_xor.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function
- dispatch:
- CompositeExplicitAutograd: bitwise_xor
- autogen: bitwise_xor.Scalar_Tensor_out
- tags: pointwise
- - func: bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: method, function
- structured_delegate: bitwise_xor.Tensor_out
- tags: [core, pointwise]
- - func: bitwise_xor_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- tags: pointwise
- - func: bitwise_xor_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- structured_delegate: bitwise_xor.Tensor_out
- tags: pointwise
- - func: __xor__.Scalar(Tensor self, Scalar other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: method, function
- tags: pointwise
- - func: __xor__.Tensor(Tensor self, Tensor other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: method, function
- tags: pointwise
- - func: __ixor__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- tags: pointwise
- - func: __ixor__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- tags: pointwise
- - func: __lshift__.Scalar(Tensor self, Scalar other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: method, function
- dispatch:
- CPU, CUDA: __lshift__
- tags: pointwise
- - func: __lshift__.Tensor(Tensor self, Tensor other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: method, function
- dispatch:
- CPU, CUDA: __lshift__
- tags: pointwise
- - func: __ilshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- dispatch:
- CPU, CUDA: __ilshift__
- autogen: __lshift__.Scalar_out
- tags: pointwise
- - func: __ilshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- dispatch:
- CPU, CUDA: __ilshift__
- autogen: __lshift__.Tensor_out
- tags: pointwise
- - func: bitwise_left_shift.Tensor(Tensor self, Tensor other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- structured_delegate: bitwise_left_shift.Tensor_out
- tags: pointwise
- - func: bitwise_left_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- structured_delegate: bitwise_left_shift.Tensor_out
- tags: pointwise
- - func: bitwise_left_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: bitwise_left_shift_out
- tags: pointwise
- - func: bitwise_left_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: method, function
- dispatch:
- CompositeExplicitAutograd: bitwise_left_shift
- tags: pointwise
- - func: bitwise_left_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- dispatch:
- CompositeExplicitAutograd: bitwise_left_shift_
- tags: pointwise
- - func: bitwise_left_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: function
- dispatch:
- CompositeExplicitAutograd: bitwise_left_shift_out
- tags: pointwise
- - func: bitwise_left_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function
- dispatch:
- CompositeExplicitAutograd: bitwise_left_shift
- autogen: bitwise_left_shift.Scalar_Tensor_out
- tags: pointwise
- - func: __rshift__.Scalar(Tensor self, Scalar other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: method, function
- dispatch:
- CPU, CUDA: __rshift__
- tags: pointwise
- - func: __rshift__.Tensor(Tensor self, Tensor other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: method, function
- dispatch:
- CPU, CUDA: __rshift__
- tags: pointwise
- - func: __irshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- dispatch:
- CPU, CUDA: __irshift__
- autogen: __rshift__.Scalar_out
- - func: __irshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- dispatch:
- CPU, CUDA: __irshift__
- autogen: __rshift__.Tensor_out
- - func: bitwise_right_shift.Tensor(Tensor self, Tensor other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function, method
- structured_delegate: bitwise_right_shift.Tensor_out
- tags: pointwise
- - func: bitwise_right_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- structured_delegate: bitwise_right_shift.Tensor_out
- tags: pointwise
- - func: bitwise_right_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: bitwise_right_shift_out
- tags: pointwise
- - func: bitwise_right_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: method, function
- dispatch:
- CompositeExplicitAutograd: bitwise_right_shift
- tags: pointwise
- - func: bitwise_right_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- dispatch:
- CompositeExplicitAutograd: bitwise_right_shift_
- tags: pointwise
- - func: bitwise_right_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: function
- dispatch:
- CompositeExplicitAutograd: bitwise_right_shift_out
- tags: pointwise
- - func: bitwise_right_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function
- dispatch:
- CompositeExplicitAutograd: bitwise_right_shift
- autogen: bitwise_right_shift.Scalar_Tensor_out
- tags: pointwise
- - func: tril_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)
- structured_delegate: tril.out
- variants: method
- - func: triu_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)
- structured_delegate: triu.out
- variants: method
- - func: digamma_(Tensor(a!) self) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured_delegate: digamma.out
- variants: method
- tags: pointwise
- - func: lerp_.Scalar(Tensor(a!) self, Tensor end, Scalar weight) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- structured_delegate: lerp.Scalar_out
- tags: pointwise
- - func: lerp_.Tensor(Tensor(a!) self, Tensor end, Tensor weight) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- structured_delegate: lerp.Tensor_out
- tags: pointwise
- - func: addbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
- variants: method
- dispatch:
- CPU, CUDA: addbmm_
- MPS: addbmm_mps_
- - func: addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU, CUDA: addbmm_out
- MPS: addbmm_out_mps
- - func: addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
- variants: method, function
- dispatch:
- CPU, CUDA: addbmm
- MPS: addbmm_mps
- - func: random_.from(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- tags: nondeterministic_seeded
- dispatch:
- CPU, CUDA: random_
- Meta: random_meta_
- MPS: random_mps_
- autogen: random.from, random.from_out
- - func: random_.to(Tensor(a!) self, int to, *, Generator? generator=None) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- tags: nondeterministic_seeded
- variants: method
- dispatch:
- CPU, CUDA: random_
- Meta: random_meta_
- MPS: random_mps_
- autogen: random.to, random.to_out
- - func: random_(Tensor(a!) self, *, Generator? generator=None) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- tags: nondeterministic_seeded
- variants: method
- dispatch:
- CPU, CUDA: random_
- Meta: random_meta_
- autogen: random, random.out
- - func: uniform_(Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- tags: nondeterministic_seeded
- variants: method
- dispatch:
- CPU, CUDA: uniform_
- MPS: uniform_mps_
- Meta: uniform_meta_
- autogen: uniform, uniform.out
- - func: cauchy_(Tensor(a!) self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- tags: nondeterministic_seeded
- dispatch:
- CPU, CUDA: cauchy_
- autogen: cauchy, cauchy.out
- - func: log_normal_(Tensor(a!) self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- tags: nondeterministic_seeded
- variants: method
- dispatch:
- CPU, CUDA: log_normal_
- autogen: log_normal, log_normal.out
- - func: exponential_(Tensor(a!) self, float lambd=1, *, Generator? generator=None) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- tags: nondeterministic_seeded
- variants: method
- dispatch:
- CPU, CUDA: exponential_
- MPS: exponential_mps_
- autogen: exponential, exponential.out
- - func: geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- tags: nondeterministic_seeded
- variants: method
- dispatch:
- CPU, CUDA: geometric_
- # wrappers for TH functions
- autogen: geometric, geometric.out
- - func: diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)
- - func: diag(Tensor self, int diagonal=0) -> Tensor
- variants: method, function
- - func: cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
- - func: cross(Tensor self, Tensor other, int? dim=None) -> Tensor
- variants: method, function
- - func: triu.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- dispatch:
- CPU: triu_cpu
- CUDA: triu_cuda
- MPS: triu_mps_out
- - func: triu(Tensor self, int diagonal=0) -> Tensor
- structured_delegate: triu.out
- variants: method, function
- - func: tril.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- dispatch:
- CPU: tril_cpu
- CUDA: tril_cuda
- MPS: tril_mps_out
- - func: tril(Tensor self, int diagonal=0) -> Tensor
- structured_delegate: tril.out
- variants: method, function
- - func: tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- dispatch:
- CPU: tril_indices_cpu
- CUDA: tril_indices_cuda
- autogen: tril_indices.out
- - func: triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- dispatch:
- CPU: triu_indices_cpu
- CUDA: triu_indices_cuda
- autogen: triu_indices.out
- - func: trace(Tensor self) -> Tensor
- variants: method, function
- dispatch:
- CPU: trace_cpu
- CUDA: trace_cuda
- MPS: trace_mps_out
- autogen: trace.out
- - func: trace_backward(Tensor grad, SymInt[] sizes) -> Tensor
- variants: function
- device_check: NoCheck
- device_guard: False
- dispatch:
- CompositeImplicitAutograd: trace_backward_symint
- - func: ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU, CUDA: ne_Scalar_out
- MPS: ne_scalar_out_mps
- QuantizedCPU: ne_out_quantized_cpu
- tags: pointwise
- - func: ne.Scalar(Tensor self, Scalar other) -> Tensor
- structured_delegate: ne.Scalar_out
- device_check: NoCheck # TensorIterator
- variants: method, function
- dispatch:
- QuantizedCPU: ne_quantized_cpu
- tags: [core, pointwise]
- - func: ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU, CUDA: ne_Tensor_out
- MPS: ne_tensor_out_mps
- QuantizedCPU: ne_out_quantized_cpu
- tags: pointwise
- - func: ne.Tensor(Tensor self, Tensor other) -> Tensor
- structured_delegate: ne.Tensor_out
- device_check: NoCheck # TensorIterator
- variants: method, function
- dispatch:
- QuantizedCPU: ne_quantized_cpu
- tags: [core, pointwise]
- - func: ne_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
- structured_delegate: ne.Scalar_out
- device_check: NoCheck # TensorIterator
- variants: method
- - func: ne_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
- structured_delegate: ne.Tensor_out
- device_check: NoCheck # TensorIterator
- variants: method
- # not_equal, alias for torch.ne
- - func: not_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
- - func: not_equal.Scalar(Tensor self, Scalar other) -> Tensor
- variants: method, function
- - func: not_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- - func: not_equal.Tensor(Tensor self, Tensor other) -> Tensor
- variants: method, function
- - func: not_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
- variants: method
- - func: not_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
- variants: method
- - func: eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU, CUDA: eq_Scalar_out
- MPS: eq_scalar_out_mps
- QuantizedCPU: eq_out_quantized_cpu
- tags: pointwise
- - func: eq.Scalar(Tensor self, Scalar other) -> Tensor
- structured_delegate: eq.Scalar_out
- device_check: NoCheck # TensorIterator
- variants: method, function
- dispatch:
- QuantizedCPU: eq_quantized_cpu
- tags: [core, pointwise]
- - func: eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU, CUDA: eq_Tensor_out
- MPS: eq_tensor_out_mps
- QuantizedCPU: eq_out_quantized_cpu
- tags: pointwise
- - func: eq.Tensor(Tensor self, Tensor other) -> Tensor
- structured_delegate: eq.Tensor_out
- device_check: NoCheck # TensorIterator
- variants: method, function
- dispatch:
- QuantizedCPU: eq_quantized_cpu
- tags: [core, pointwise]
- - func: ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU, CUDA: ge_Scalar_out
- MPS: ge_scalar_out_mps
- QuantizedCPU: ge_out_quantized_cpu
- tags: pointwise
- - func: ge.Scalar(Tensor self, Scalar other) -> Tensor
- structured_delegate: ge.Scalar_out
- device_check: NoCheck # TensorIterator
- variants: method, function
- dispatch:
- QuantizedCPU: ge_quantized_cpu
- tags: [core, pointwise]
- - func: ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU, CUDA: ge_Tensor_out
- MPS: ge_tensor_out_mps
- QuantizedCPU: ge_out_quantized_cpu
- tags: pointwise
- - func: ge.Tensor(Tensor self, Tensor other) -> Tensor
- structured_delegate: ge.Tensor_out
- device_check: NoCheck # TensorIterator
- variants: method, function
- dispatch:
- QuantizedCPU: ge_quantized_cpu
- tags: [core, pointwise]
- - func: ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
- structured_delegate: ge.Scalar_out
- device_check: NoCheck # TensorIterator
- variants: method
- - func: ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
- structured_delegate: ge.Tensor_out
- device_check: NoCheck # TensorIterator
- variants: method
- # greater_equal, alias for torch.ge
- - func: greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
- - func: greater_equal.Scalar(Tensor self, Scalar other) -> Tensor
- variants: method, function
- - func: greater_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- - func: greater_equal.Tensor(Tensor self, Tensor other) -> Tensor
- variants: method, function
- - func: greater_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
- variants: method
- - func: greater_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
- variants: method
- - func: le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU, CUDA: le_Scalar_out
- MPS: le_scalar_out_mps
- QuantizedCPU: le_out_quantized_cpu
- tags: pointwise
- - func: le.Scalar(Tensor self, Scalar other) -> Tensor
- structured_delegate: le.Scalar_out
- device_check: NoCheck # TensorIterator
- variants: method, function
- dispatch:
- QuantizedCPU: le_quantized_cpu
- tags: [core, pointwise]
- - func: le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU, CUDA: le_Tensor_out
- MPS: le_tensor_out_mps
- QuantizedCPU: le_out_quantized_cpu
- tags: pointwise
- - func: le.Tensor(Tensor self, Tensor other) -> Tensor
- structured_delegate: le.Tensor_out
- device_check: NoCheck # TensorIterator
- variants: method, function
- dispatch:
- QuantizedCPU: le_quantized_cpu
- tags: [core, pointwise]
- - func: le_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
- structured_delegate: le.Scalar_out
- device_check: NoCheck # TensorIterator
- variants: method
- - func: le_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
- structured_delegate: le.Tensor_out
- device_check: NoCheck # TensorIterator
- variants: method
- # less_equal, alias for torch.le
- - func: less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
- - func: less_equal.Scalar(Tensor self, Scalar other) -> Tensor
- variants: method, function
- - func: less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- - func: less_equal.Tensor(Tensor self, Tensor other) -> Tensor
- variants: method, function
- - func: less_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
- variants: method
- - func: less_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
- variants: method
- - func: gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU, CUDA: gt_Scalar_out
- MPS: gt_scalar_out_mps
- QuantizedCPU: gt_out_quantized_cpu
- tags: pointwise
- - func: gt.Scalar(Tensor self, Scalar other) -> Tensor
- structured_delegate: gt.Scalar_out
- device_check: NoCheck # TensorIterator
- variants: method, function
- dispatch:
- QuantizedCPU: gt_quantized_cpu
- tags: [core, pointwise]
- - func: gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU, CUDA: gt_Tensor_out
- MPS: gt_tensor_out_mps
- QuantizedCPU: gt_out_quantized_cpu
- tags: pointwise
- - func: gt.Tensor(Tensor self, Tensor other) -> Tensor
- structured_delegate: gt.Tensor_out
- device_check: NoCheck # TensorIterator
- variants: method, function
- dispatch:
- QuantizedCPU: gt_quantized_cpu
- tags: [core, pointwise]
- - func: gt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
- structured_delegate: gt.Scalar_out
- device_check: NoCheck # TensorIterator
- variants: method
- - func: gt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
- structured_delegate: gt.Tensor_out
- device_check: NoCheck # TensorIterator
- variants: method
- # greater, alias for torch.gt
- - func: greater.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
- - func: greater.Scalar(Tensor self, Scalar other) -> Tensor
- variants: method, function
- - func: greater.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- - func: greater.Tensor(Tensor self, Tensor other) -> Tensor
- variants: method, function
- - func: greater_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
- variants: method
- - func: greater_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
- variants: method
- - func: lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU, CUDA: lt_Scalar_out
- MPS: lt_scalar_out_mps
- QuantizedCPU: lt_out_quantized_cpu
- tags: pointwise
- - func: lt.Scalar(Tensor self, Scalar other) -> Tensor
- structured_delegate: lt.Scalar_out
- device_check: NoCheck # TensorIterator
- variants: method, function
- dispatch:
- QuantizedCPU: lt_quantized_cpu
- tags: [core, pointwise]
- - func: lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU, CUDA: lt_Tensor_out
- MPS: lt_tensor_out_mps
- QuantizedCPU: lt_out_quantized_cpu
- tags: pointwise
- - func: lt.Tensor(Tensor self, Tensor other) -> Tensor
- structured_delegate: lt.Tensor_out
- device_check: NoCheck # TensorIterator
- variants: method, function
- dispatch:
- QuantizedCPU: lt_quantized_cpu
- tags: [core, pointwise]
- - func: lt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
- structured_delegate: lt.Scalar_out
- device_check: NoCheck # TensorIterator
- variants: method
- - func: lt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
- structured_delegate: lt.Tensor_out
- device_check: NoCheck # TensorIterator
- variants: method
- # less, alias for torch.lt
- - func: less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
- - func: less.Scalar(Tensor self, Scalar other) -> Tensor
- variants: method, function
- - func: less.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- - func: less.Tensor(Tensor self, Tensor other) -> Tensor
- variants: method, function
- - func: less_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
- variants: method
- - func: less_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
- variants: method
- - func: take.out(Tensor self, Tensor index, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU, CUDA: take_out
- - func: take(Tensor self, Tensor index) -> Tensor
- variants: method, function
- dispatch:
- CPU, CUDA: take
- - func: take_along_dim.out(Tensor self, Tensor indices, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
- - func: take_along_dim(Tensor self, Tensor indices, int? dim=None) -> Tensor
- variants: method, function
- - func: index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU, QuantizedCPU: index_select_out_cpu_
- CUDA, QuantizedCUDA: index_select_out_cuda
- MPS: index_select_out_mps
- - func: index_select(Tensor self, int dim, Tensor index) -> Tensor
- variants: method, function
- dispatch:
- CPU: index_select_cpu_
- QuantizedCPU: index_select_quantized_cpu_
- CUDA: index_select_cuda
- QuantizedCUDA: index_select_quantized_cuda
- SparseCPU: index_select_sparse_cpu
- SparseCUDA: index_select_sparse_cuda
- MPS: index_select_mps
- tags: core
- - func: index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)
- - func: index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor
- variants: method, function
- - func: index_select_backward(Tensor grad, SymInt[] self_sizes, int dim, Tensor index) -> Tensor
- variants: function
- device_check: NoCheck
- device_guard: False
- dispatch:
- CompositeImplicitAutograd: index_select_backward_symint
- - func: masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU: masked_select_out_cpu
- CUDA: masked_select_out_cuda
- MPS: masked_select_out_mps
- tags: dynamic_output_shape
- - func: masked_select(Tensor self, Tensor mask) -> Tensor
- variants: method, function
- dispatch:
- CPU: masked_select_cpu
- CUDA: masked_select_cuda
- MPS: masked_select_mps
- tags: dynamic_output_shape
- - func: masked_select_backward(Tensor grad, Tensor input, Tensor mask) -> Tensor
- variants: function
- device_check: NoCheck
- device_guard: False
- - func: nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU: nonzero_out_cpu
- CUDA: nonzero_out_cuda
- MPS: nonzero_out_mps
- tags: dynamic_output_shape
- - func: nonzero(Tensor self) -> Tensor
- variants: method, function
- dispatch:
- CPU: nonzero_cpu
- CUDA: nonzero_cuda
- MPS: nonzero_mps
- tags: [dynamic_output_shape, core]
- - func: nonzero_numpy(Tensor self) -> Tensor[]
- variants: method, function
- - func: argwhere(Tensor self) -> Tensor
- variants: method, function
- tags: dynamic_output_shape
- - func: gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)
- structured: True
- dispatch:
- CPU, CUDA: gather_out
- MPS: gather_out_mps
- - func: gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor
- variants: method, function
- structured_delegate: gather.out
- tags: core
- - func: gather_backward(Tensor grad, Tensor self, int dim, Tensor index, bool sparse_grad) -> Tensor
- variants: function
- device_check: NoCheck
- device_guard: False
- - func: gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)
- - func: gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor
- variants: method, function
- - func: _gather_sparse_backward(Tensor self, int dim, Tensor index, Tensor grad) -> Tensor
- - func: addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU, CUDA: addcmul_out
- MPS: addcmul_out_mps
- tags: pointwise
- - func: addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor
- structured_delegate: addcmul.out
- device_check: NoCheck # TensorIterator
- variants: method, function
- tags: pointwise
- - func: addcmul_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)
- structured_delegate: addcmul.out
- device_check: NoCheck # TensorIterator
- variants: method
- tags: pointwise
- - func: addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU, CUDA: addcdiv_out
- MPS: addcdiv_out_mps
- tags: pointwise
- - func: addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor
- structured_delegate: addcdiv.out
- device_check: NoCheck # TensorIterator
- variants: method, function
- tags: pointwise
- - func: addcdiv_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)
- structured_delegate: addcdiv.out
- device_check: NoCheck # TensorIterator
- variants: method
- tags: pointwise
- - func: cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, float label_smoothing=0.0) -> Tensor
- python_module: nn
- dispatch:
- CompositeImplicitAutograd: cross_entropy_loss_symint
- - func: triangular_solve.X(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) -> (Tensor(a!) solution, Tensor(b!) cloned_coefficient)
- structured: True
- dispatch:
- CPU, CUDA: triangular_solve_out
- MPS: triangular_solve_mps_out
- SparseCsrCPU: triangular_solve_out_sparse_csr_cpu
- SparseCsrCUDA: triangular_solve_out_sparse_csr_cuda
- - func: triangular_solve(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False) -> (Tensor solution, Tensor cloned_coefficient)
- structured_delegate: triangular_solve.X
- variants: method, function
- - func: _linalg_check_errors(Tensor info, str api_name, *, bool is_matrix) -> ()
- dispatch:
- CompositeExplicitAutograd: _linalg_check_errors
- - func: linalg_solve_triangular.out(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False, Tensor(a!) out) -> Tensor(a!)
- python_module: linalg
- dispatch:
- CPU, CUDA: linalg_solve_triangular_out
- MPS: linalg_solve_triangular_mps_out
- - func: linalg_solve_triangular(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False) -> Tensor
- python_module: linalg
- variants: function
- dispatch:
- CPU, CUDA: linalg_solve_triangular
- MPS: linalg_solve_triangular_mps
- - func: linalg_vander(Tensor x, *, int? N=None) -> Tensor
- python_module: linalg
- - func: svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V)
- - func: svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V)
- variants: method, function
- # swapaxes, alias for transpose
- - func: swapaxes(Tensor(a) self, int axis0, int axis1) -> Tensor(a)
- variants: function, method
- device_check: NoCheck
- device_guard: False
- - func: swapaxes_(Tensor(a!) self, int axis0, int axis1) -> Tensor(a!)
- variants: method
- device_check: NoCheck
- device_guard: False
- tags: inplace_view
- # swapdims, alias for transpose
- - func: swapdims(Tensor(a) self, int dim0, int dim1) -> Tensor(a)
- variants: function, method
- device_check: NoCheck
- device_guard: False
- - func: swapdims_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)
- variants: method
- device_check: NoCheck
- device_guard: False
- tags: inplace_view
- - func: cholesky.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU, CUDA: cholesky_out
- - func: cholesky(Tensor self, bool upper=False) -> Tensor
- variants: method, function
- dispatch:
- CPU, CUDA: cholesky
- - func: cholesky_solve.out(Tensor self, Tensor input2, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CompositeExplicitAutograd: cholesky_solve_out
- - func: cholesky_solve(Tensor self, Tensor input2, bool upper=False) -> Tensor
- variants: method, function
- dispatch:
- CompositeExplicitAutograd: cholesky_solve
- - func: _cholesky_solve_helper(Tensor self, Tensor A, bool upper) -> Tensor
- variants: function
- dispatch:
- CPU: _cholesky_solve_helper_cpu
- CUDA: _cholesky_solve_helper_cuda
- autogen: _cholesky_solve_helper.out
- - func: cholesky_inverse(Tensor self, bool upper=False) -> Tensor
- variants: method, function
- dispatch:
- CPU, CUDA: cholesky_inverse
- - func: cholesky_inverse.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU, CUDA: cholesky_inverse_out
- - func: qr.Q(Tensor self, bool some=True, *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)
- - func: qr(Tensor self, bool some=True) -> (Tensor Q, Tensor R)
- variants: method, function
- - func: geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau)
- dispatch:
- CPU, CUDA: geqrf_out
- - func: geqrf(Tensor self) -> (Tensor a, Tensor tau)
- variants: method, function
- dispatch:
- CPU, CUDA: geqrf
- # orgqr, alias for linalg_householder_product
- - func: orgqr(Tensor self, Tensor input2) -> Tensor
- variants: method, function
- - func: orgqr.out(Tensor self, Tensor input2, *, Tensor(a!) out) -> Tensor(a!)
- - func: ormqr.out(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU, CUDA: ormqr_out
- - func: ormqr(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False) -> Tensor
- variants: method, function
- dispatch:
- CPU, CUDA: ormqr
- - func: _lu_with_info(Tensor self, bool pivot=True, bool check_errors=True) -> (Tensor LU, Tensor pivots, Tensor info)
- variants: function
- - func: lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!)
- - func: lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor
- variants: method, function
- # lu_unpack
- - func: lu_unpack(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True) -> (Tensor P, Tensor L, Tensor U)
- structured_delegate: lu_unpack.out
- variants: function
- - func: lu_unpack.out(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True, *, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U)
- variants: function
- structured: True
- dispatch:
- CPU, CUDA: lu_unpack_out
- # TODO: remove dispatch section when porting TH CUDA to ATen
- - func: multinomial.out(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
- tags: nondeterministic_seeded
- dispatch:
- CPU, CUDA: multinomial_out
- MPS: multinomial_out_mps
- - func: multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor
- variants: method, function
- dispatch:
- CPU, CUDA: multinomial
- MPS: multinomial_mps
- tags: nondeterministic_seeded
- - func: lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: lgamma_out
- tags: pointwise
- - func: lgamma_(Tensor(a!) self) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured_delegate: lgamma.out
- variants: method
- tags: pointwise
- - func: lgamma(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: lgamma.out
- variants: method, function
- tags: pointwise
- - func: digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: digamma_out
- tags: pointwise
- - func: digamma(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: digamma.out
- variants: method, function
- tags: pointwise
- - func: polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: polygamma_out
- tags: pointwise
- - func: polygamma(int n, Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: polygamma.out
- variants: method, function
- tags: pointwise
- - func: polygamma_(Tensor(a!) self, int n) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- dispatch:
- CompositeExplicitAutograd: polygamma_
- tags: pointwise
- - func: erfinv(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: erfinv.out
- variants: method, function
- dispatch:
- SparseCPU, SparseCUDA: erfinv_sparse
- SparseCsrCPU, SparseCsrCUDA: erfinv_sparse_csr
- tags: pointwise
- - func: erfinv_(Tensor(a!) self) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured_delegate: erfinv.out
- variants: method
- dispatch:
- SparseCPU, SparseCUDA: erfinv_sparse_
- SparseCsrCPU, SparseCsrCUDA: erfinv_sparse_csr_
- tags: pointwise
- - func: erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: erfinv_out
- SparseCPU, SparseCUDA: erfinv_sparse_out
- SparseCsrCPU, SparseCsrCUDA: erfinv_sparse_csr_out
- tags: pointwise
- - func: i0(Tensor self) -> Tensor
- structured_delegate: i0.out
- variants: function, method
- tags: pointwise
- - func: i0_(Tensor(a!) self) -> Tensor(a!)
- structured_delegate: i0.out
- variants: function, method
- tags: pointwise
- - func: i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: i0_out
- tags: pointwise
- - func: sign(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: sign.out
- variants: function, method
- dispatch:
- SparseCPU, SparseCUDA: sign_sparse
- SparseCsrCPU, SparseCsrCUDA: sign_sparse_csr
- tags: [core, pointwise]
- - func: sign_(Tensor(a!) self) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured_delegate: sign.out
- variants: method
- dispatch:
- SparseCPU, SparseCUDA: sign_sparse_
- SparseCsrCPU, SparseCsrCUDA: sign_sparse_csr_
- tags: pointwise
- - func: sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: sign_out
- MPS: sign_out_mps
- SparseCPU, SparseCUDA: sign_sparse_out
- SparseCsrCPU, SparseCsrCUDA: sign_sparse_csr_out
- tags: pointwise
- - func: signbit(Tensor self) -> Tensor
- variants: function, method
- structured_delegate: signbit.out
- dispatch:
- SparseCPU, SparseCUDA: signbit_sparse
- SparseCsrCPU, SparseCsrCUDA: signbit_sparse_csr
- tags: pointwise
- - func: signbit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU: signbit_out
- CUDA: signbit_out
- MPS: signbit_out_mps
- SparseCPU, SparseCUDA: signbit_sparse_out
- SparseCsrCPU, SparseCsrCUDA: signbit_sparse_csr_out
- tags: pointwise
- - func: dist(Tensor self, Tensor other, Scalar p=2) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: method, function
- dispatch:
- CompositeExplicitAutograd: dist
- autogen: dist.out
- - func: atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: atan2_out
- MPS: atan2_mps_out
- tags: pointwise
- - func: atan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured_delegate: atan2.out
- variants: method
- tags: pointwise
- - func: atan2(Tensor self, Tensor other) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: atan2.out
- variants: method, function
- tags: pointwise
- # arctan2, alias of atan2
- - func: arctan2(Tensor self, Tensor other) -> Tensor
- variants: method, function
- - func: arctan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- - func: arctan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)
- variants: method
- - func: lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: lerp_Scalar
- tags: pointwise
- - func: lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: lerp_Tensor
- tags: pointwise
- - func: lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: method, function
- structured_delegate: lerp.Scalar_out
- tags: pointwise
- - func: lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: method, function
- structured_delegate: lerp.Tensor_out
- tags: pointwise
- - func: histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU: histogram_histc_cpu_out
- CUDA: _histc_out_cuda
- - func: histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor
- variants: method, function
- dispatch:
- CPU: histogram_histc_cpu
- CUDA: _histc_cuda
- - func: histogram.bins_tensor_out(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges)
- dispatch:
- CPU: histogram_out_cpu
- - func: histogram.bins_tensor(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges)
- variants: method, function
- dispatch:
- CPU: histogram_cpu
- - func: histogram.bin_ct_out(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges)
- dispatch:
- CPU: histogram_out_cpu
- - func: histogram.bin_ct(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges)
- variants: method, function
- dispatch:
- CPU: histogram_cpu
- - func: _histogramdd_bin_edges(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor[]
- dispatch:
- CPU: histogramdd_bin_edges_cpu
- autogen: _histogramdd_bin_edges.out
- - func: _histogramdd_from_bin_cts(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor
- dispatch:
- CPU: histogramdd_cpu
- autogen: _histogramdd_from_bin_cts.out
- - func: _histogramdd_from_bin_tensors(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False) -> Tensor
- dispatch:
- CPU: histogramdd_cpu
- autogen: _histogramdd_from_bin_tensors.out
- - func: histogramdd(Tensor self, int[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)
- - func: histogramdd.int_bins(Tensor self, int bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)
- - func: histogramdd.TensorList_bins(Tensor self, Tensor[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)
- - func: fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- dispatch:
- CompositeExplicitAutograd: fmod_out
- tags: pointwise
- - func: fmod.Scalar(Tensor self, Scalar other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: method, function
- dispatch:
- CompositeExplicitAutograd: fmod
- tags: pointwise
- - func: fmod_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- dispatch:
- CompositeExplicitAutograd: fmod_
- tags: pointwise
- - func: fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: fmod_out
- MPS: fmod_mps_out
- tags: pointwise
- - func: fmod.Tensor(Tensor self, Tensor other) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: fmod.Tensor_out
- variants: method, function
- tags: [core, pointwise]
- - func: fmod_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- structured_delegate: fmod.Tensor_out
- tags: pointwise
- - func: hypot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: hypot_out
- tags: pointwise
- - func: hypot(Tensor self, Tensor other) -> Tensor
- structured_delegate: hypot.out
- variants: method, function
- tags: pointwise
- - func: hypot_(Tensor(a!) self, Tensor other) -> Tensor(a!)
- structured_delegate: hypot.out
- variants: method
- tags: pointwise
- - func: igamma.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: igamma_out
- tags: pointwise
- - func: igamma(Tensor self, Tensor other) -> Tensor
- structured_delegate: igamma.out
- variants: method, function
- tags: pointwise
- - func: igamma_(Tensor(a!) self, Tensor other) -> Tensor(a!)
- structured_delegate: igamma.out
- variants: method
- tags: pointwise
- - func: igammac.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: igammac_out
- tags: pointwise
- - func: igammac(Tensor self, Tensor other) -> Tensor
- structured_delegate: igammac.out
- variants: method, function
- tags: pointwise
- - func: igammac_(Tensor(a!) self, Tensor other) -> Tensor(a!)
- structured_delegate: igammac.out
- variants: method
- tags: pointwise
- - func: nextafter.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: nextafter_out
- tags: pointwise
- - func: nextafter(Tensor self, Tensor other) -> Tensor
- structured_delegate: nextafter.out
- variants: method, function
- tags: pointwise
- - func: nextafter_(Tensor(a!) self, Tensor other) -> Tensor(a!)
- structured_delegate: nextafter.out
- variants: method
- tags: pointwise
- - func: remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CompositeExplicitAutograd: remainder_out
- tags: pointwise
- - func: remainder.Scalar(Tensor self, Scalar other) -> Tensor
- variants: method, function
- dispatch:
- CompositeExplicitAutograd: remainder
- tags: pointwise
- - func: remainder_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
- variants: method
- dispatch:
- CompositeExplicitAutograd: remainder_
- tags: pointwise
- - func: remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: remainder_out
- MPS: remainder_out_mps
- tags: pointwise
- - func: remainder.Tensor(Tensor self, Tensor other) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: remainder.Tensor_out
- variants: method, function
- tags: [core, pointwise]
- - func: remainder_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured_delegate: remainder.Tensor_out
- variants: method
- tags: pointwise
- - func: remainder.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: function
- dispatch:
- CPU, CUDA, MPS: remainder
- autogen: remainder.Scalar_Tensor_out
- tags: pointwise
- - func: min(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: method, function
- dispatch:
- CPU, CUDA: min
- MPS: min_mps
- QuantizedCPU: min_quantized_cpu
- # Not to be confused with binary op `min.out`. Commented because of failed CI
- # FIXME: enable this
- #- func: min.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- # device_check: NoCheck # TensorIterator
- # dispatch:
- # CompositeExplicitAutograd: min_unary_out
- - func: fmin(Tensor self, Tensor other) -> Tensor
- structured_delegate: fmin.out
- device_check: NoCheck # TensorIterator
- variants: method, function
- tags: pointwise
- - func: fmin.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU, CUDA: fmin_out
- tags: pointwise
- - func: max(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: method, function
- dispatch:
- CPU, CUDA: max
- MPS: max_mps
- QuantizedCPU: max_quantized_cpu
- - func: fmax(Tensor self, Tensor other) -> Tensor
- structured_delegate: fmax.out
- device_check: NoCheck # TensorIterator
- variants: method, function
- tags: pointwise
- - func: fmax.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU, CUDA: fmax_out
- tags: pointwise
- - func: maximum(Tensor self, Tensor other) -> Tensor
- structured_delegate: maximum.out
- device_check: NoCheck # TensorIterator
- variants: method, function
- tags: [core, pointwise]
- - func: maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU, CUDA: maximum_out
- MPS: maximum_out_mps
- tags: pointwise
- # binary max, alias of maximum
- # NOTE: max is not an alias for maximum, since there is also unary max
- - func: max.other(Tensor self, Tensor other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: method, function
- tags: pointwise
- - func: max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- tags: pointwise
- - func: max.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU, CUDA: max_unary_out
- QuantizedCPU: max_quantized_unary_out
- - func: minimum(Tensor self, Tensor other) -> Tensor
- structured_delegate: minimum.out
- device_check: NoCheck # TensorIterator
- variants: method, function
- tags: [core, pointwise]
- - func: minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- device_check: NoCheck # TensorIterator
- dispatch:
- CPU, CUDA: minimum_out
- MPS: minimum_out_mps
- tags: pointwise
- # binary min, alias for minimum
- # NOTE: min is not an alias for minimum, since there is also unary min
- - func: min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- tags: pointwise
- - func: min.other(Tensor self, Tensor other) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: method, function
- tags: pointwise
- - func: quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
- variants: method, function
- - func: quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
- - func: quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
- variants: method, function
- - func: quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
- - func: nanquantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
- variants: method, function
- - func: nanquantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
- - func: nanquantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
- variants: method, function
- - func: nanquantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
- - func: sort.values(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
- device_check: NoCheck # TensorIterator
- dispatch:
- CompositeExplicitAutograd: sort_out
- - func: sort.values_stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
- structured: True
- dispatch:
- CPU, CUDA: sort_stable_out
- MPS: sort_stable_out_mps
- - func: sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)
- device_check: NoCheck # TensorIterator
- variants: method, function
- dispatch:
- CompositeExplicitAutograd: sort
- - func: sort.stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)
- structured_delegate: sort.values_stable
- variants: method, function
- dispatch:
- QuantizedCPU: sort_quantized_cpu_stable
- - func: sort.dimname_values(Tensor self, Dimname dim, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
- - func: sort.dimname_values_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
- - func: sort.dimname(Tensor self, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices)
- variants: method, function
- - func: sort.dimname_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices)
- variants: method, function
- - func: msort.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- - func: msort(Tensor self) -> Tensor
- variants: method, function
- - func: argsort(Tensor self, int dim=-1, bool descending=False) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: method, function
- - func: argsort.stable(Tensor self, *, bool stable, int dim=-1, bool descending=False) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: method, function
- dispatch:
- CPU, CUDA, MPS: argsort_stable
- autogen: argsort.stable_out
- - func: argsort.dimname(Tensor self, Dimname dim, bool descending=False) -> Tensor
- variants: method, function
- - func: topk.values(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
- structured: True
- dispatch:
- CPU: topk_out_cpu
- CUDA: topk_out_cuda
- MPS: topk_out_mps
- - func: topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices)
- variants: method, function
- structured_delegate: topk.values
- dispatch:
- QuantizedCPU: topk_quantized_cpu
- tags: core
- - func: all(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: all.all_out
- variants: method, function
- - func: all.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck
- structured: True
- dispatch:
- CPU, CUDA: all_all_out
- MPS: all_all_out_mps
- - func: any(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: any.all_out
- variants: method, function
- dispatch:
- SparseCPU, SparseCUDA: any_sparse
- - func: any.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck
- structured: True
- dispatch:
- CPU, CUDA: any_all_out
- MPS: any_all_out_mps
- - func: renorm.out(Tensor self, Scalar p, int dim, Scalar maxnorm, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- dispatch:
- CPU, CUDA: renorm_out
- - func: renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor
- device_check: NoCheck # TensorIterator
- variants: method, function
- structured_delegate: renorm.out
- - func: renorm_(Tensor(a!) self, Scalar p, int dim, Scalar maxnorm) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- variants: method
- structured_delegate: renorm.out
- - func: unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a)
- variants: method
- device_check: NoCheck
- device_guard: False
- dispatch:
- CPU, CUDA, Meta, MPS: unfold
- QuantizedCPU, QuantizedCUDA: unfold
- - func: unfold_backward(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step) -> Tensor
- variants: function
- dispatch:
- CPU, CUDA: unfold_backward
- autogen: unfold_backward.out
- - func: equal(Tensor self, Tensor other) -> bool
- tags: [data_dependent_output, pointwise]
- variants: method, function
- dispatch:
- CPU: cpu_equal
- CUDA: cuda_equal
- MPS: mps_equal
- QuantizedCPU: equal_quantized_cpu
- - func: pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: pow_Tensor_Tensor_out
- MPS: pow_tensor_tensor_out_mps
- tags: pointwise
- - func: pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: pow.Tensor_Tensor_out
- variants: method, function
- tags: [core, pointwise]
- - func: pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- dispatch:
- CPU, CUDA: pow_Scalar_out
- tags: pointwise
- - func: pow.Scalar(Scalar self, Tensor exponent) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: pow.Scalar_out
- tags: pointwise
- - func: pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: pow_Tensor_Scalar_out
- SparseCPU, SparseCUDA: pow_out_sparse_scalar
- MPS: pow_tensor_scalar_out_mps
- tags: pointwise
- - func: pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: pow.Tensor_Scalar_out
- variants: function, method
- dispatch:
- SparseCPU, SparseCUDA: pow_sparse_scalar
- tags: [core, pointwise]
- - func: pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured_delegate: pow.Tensor_Scalar_out
- variants: method
- tags: pointwise
- - func: pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured_delegate: pow.Tensor_Tensor_out
- variants: method
- tags: pointwise
- - func: float_power.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
- tags: pointwise
- - func: float_power.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor
- variants: function, method
- tags: pointwise
- - func: float_power.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
- tags: pointwise
- - func: float_power.Scalar(Scalar self, Tensor exponent) -> Tensor
- tags: pointwise
- - func: float_power.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)
- tags: pointwise
- - func: float_power.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor
- variants: function, method
- tags: pointwise
- - func: float_power_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)
- variants: method
- tags: pointwise
- - func: float_power_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)
- variants: method
- tags: pointwise
- - func: normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- tags: nondeterministic_seeded
- variants: method
- dispatch:
- CPU, CUDA: normal_
- MPS: normal_mps_
- Meta: normal_meta_
- SparseCsrCPU, SparseCsrCUDA: normal_sparse_csr_
- autogen: normal.out
- # Only used by the functionalization pass.
- # Normally, the codegen would be able to generate a normal() NativeFunction,
- # but we can't due to overload ambiguity with normal.Tensor_float.
- - func: normal_functional(Tensor self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor
- device_check: NoCheck # TensorIterator
- tags: nondeterministic_seeded
- dispatch:
- CompositeExplicitAutograd: normal_functional
- - func: normal.Tensor_float_out(Tensor mean, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
- tags: nondeterministic_seeded
- dispatch:
- CPU, CUDA: normal_out
- MPS: normal_mps_out
- Meta: normal_out_meta
- - func: normal.Tensor_float(Tensor mean, float std=1, *, Generator? generator=None) -> Tensor
- dispatch:
- CPU, CUDA: normal
- MPS: normal_mps
- Meta: normal_meta
- tags: nondeterministic_seeded
- - func: normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU, CUDA: normal_out
- Meta: normal_out_meta
- MPS: normal_mps_out
- tags: nondeterministic_seeded
- - func: normal.float_Tensor(float mean, Tensor std, *, Generator? generator=None) -> Tensor
- dispatch:
- CPU, CUDA: normal
- MPS: normal_mps
- Meta: normal_meta
- tags: nondeterministic_seeded
- - func: normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU, CUDA: normal_out
- Meta: normal_out_meta
- MPS: normal_mps_out
- tags: nondeterministic_seeded
- - func: normal.Tensor_Tensor(Tensor mean, Tensor std, *, Generator? generator=None) -> Tensor
- dispatch:
- CPU, CUDA: normal
- MPS: normal_mps
- Meta: normal_meta
- tags: nondeterministic_seeded
- - func: normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- dispatch:
- CompositeExplicitAutograd: normal
- tags: nondeterministic_seeded
- - func: normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CompositeExplicitAutograd: normal_out
- tags: nondeterministic_seeded
- - func: alias(Tensor(a) self) -> Tensor(a)
- variants: method, function
- dispatch:
- CompositeExplicitAutograd: alias
- tags: core
- - func: _amp_foreach_non_finite_check_and_unscale_(Tensor(a!)[] self, Tensor(b!) found_inf, Tensor inv_scale) -> ()
- variants: function
- dispatch:
- CUDA: _amp_foreach_non_finite_check_and_unscale_cuda_
- autogen: _amp_foreach_non_finite_check_and_unscale, _amp_foreach_non_finite_check_and_unscale.out
- - func: _amp_update_scale_(Tensor(a!) self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> Tensor(a!)
- variants: function
- dispatch:
- CUDA: _amp_update_scale_cuda_
- autogen: _amp_update_scale, _amp_update_scale.out
- #- func: _cat(Tensor[] tensors, int dim=0) -> Tensor
- #dispatch:
- #CPU: _cat_cpu
- #CUDA: cat_cuda
- #MPS: cat_mps
- #QuantizedCPU: cat_quantized_cpu
- #- func: _cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
- #dispatch:
- #CPU: _cat_out_cpu
- #CUDA: cat_out_cuda
- #QuantizedCPU: cat_out_quantized_cpu
- - func: _foreach_add.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_add_scalar_kernel_slow
- CUDA: foreach_tensor_add_scalar_kernel_cuda
- - func: _foreach_add_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_add_scalar_kernel_slow_
- CUDA: foreach_tensor_add_scalar_kernel_cuda_
- autogen: _foreach_add.Scalar_out
- - func: _foreach_sub.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_sub_scalar_kernel_slow
- CUDA: foreach_tensor_sub_scalar_kernel_cuda
- - func: _foreach_sub_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_sub_scalar_kernel_slow_
- CUDA: foreach_tensor_sub_scalar_kernel_cuda_
- autogen: _foreach_sub.Scalar_out
- - func: _foreach_mul.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_mul_scalar_kernel_slow
- CUDA: foreach_tensor_mul_scalar_kernel_cuda
- - func: _foreach_mul_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_mul_scalar_kernel_slow_
- CUDA: foreach_tensor_mul_scalar_kernel_cuda_
- autogen: _foreach_mul.Scalar_out
- - func: _foreach_div.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_div_scalar_kernel_slow
- CUDA: foreach_tensor_div_scalar_kernel_cuda
- - func: _foreach_div_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_div_scalar_kernel_slow_
- CUDA: foreach_tensor_div_scalar_kernel_cuda_
- autogen: _foreach_div.Scalar_out
- - func: _foreach_clamp_min.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_clamp_min_scalar_kernel_slow
- CUDA: foreach_tensor_clamp_min_scalar_kernel_cuda
- - func: _foreach_clamp_min_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_clamp_min_scalar_kernel_slow_
- CUDA: foreach_tensor_clamp_min_scalar_kernel_cuda_
- autogen: _foreach_clamp_min.Scalar_out
- - func: _foreach_clamp_max.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_clamp_max_scalar_kernel_slow
- CUDA: foreach_tensor_clamp_max_scalar_kernel_cuda
- - func: _foreach_clamp_max_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_clamp_max_scalar_kernel_slow_
- CUDA: foreach_tensor_clamp_max_scalar_kernel_cuda_
- autogen: _foreach_clamp_max.Scalar_out
- # foreach_minimum/maximum dispatches to clamp_max/min
- - func: _foreach_maximum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_clamp_min_scalar_kernel_slow
- CUDA: foreach_tensor_clamp_min_scalar_kernel_cuda
- - func: _foreach_maximum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_clamp_min_scalar_kernel_slow_
- CUDA: foreach_tensor_clamp_min_scalar_kernel_cuda_
- autogen: _foreach_maximum.Scalar_out
- - func: _foreach_minimum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_clamp_max_scalar_kernel_slow
- CUDA: foreach_tensor_clamp_max_scalar_kernel_cuda
- - func: _foreach_minimum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_clamp_max_scalar_kernel_slow_
- CUDA: foreach_tensor_clamp_max_scalar_kernel_cuda_
- autogen: _foreach_minimum.Scalar_out
- - func: _foreach_add.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_add_list_kernel_slow
- CUDA: foreach_tensor_add_list_kernel_cuda
- - func: _foreach_add_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_add_list_kernel_slow_
- CUDA: foreach_tensor_add_list_kernel_cuda_
- autogen: _foreach_add.List_out
- - func: _foreach_sub.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_sub_list_kernel_slow
- CUDA: foreach_tensor_sub_list_kernel_cuda
- - func: _foreach_sub_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_sub_list_kernel_slow_
- CUDA: foreach_tensor_sub_list_kernel_cuda_
- autogen: _foreach_sub.List_out
- - func: _foreach_mul.List(Tensor[] self, Tensor[] other) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_mul_list_kernel_slow
- CUDA: foreach_tensor_mul_list_kernel_cuda
- - func: _foreach_mul_.List(Tensor(a!)[] self, Tensor[] other) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_mul_list_kernel_slow_
- CUDA: foreach_tensor_mul_list_kernel_cuda_
- autogen: _foreach_mul.List_out
- - func: _foreach_div.List(Tensor[] self, Tensor[] other) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_div_list_kernel_slow
- CUDA: foreach_tensor_div_list_kernel_cuda
- - func: _foreach_div_.List(Tensor(a!)[] self, Tensor[] other) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_div_list_kernel_slow_
- CUDA: foreach_tensor_div_list_kernel_cuda_
- autogen: _foreach_div.List_out
- - func: _foreach_clamp_min.List(Tensor[] self, Tensor[] other) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_clamp_min_list_kernel_slow
- CUDA: foreach_tensor_clamp_min_list_kernel_cuda
- - func: _foreach_clamp_min_.List(Tensor(a!)[] self, Tensor[] other) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_clamp_min_list_kernel_slow_
- CUDA: foreach_tensor_clamp_min_list_kernel_cuda_
- autogen: _foreach_clamp_min.List_out
- - func: _foreach_clamp_max.List(Tensor[] self, Tensor[] other) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_clamp_max_list_kernel_slow
- CUDA: foreach_tensor_clamp_max_list_kernel_cuda
- - func: _foreach_clamp_max_.List(Tensor(a!)[] self, Tensor[] other) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_clamp_max_list_kernel_slow_
- CUDA: foreach_tensor_clamp_max_list_kernel_cuda_
- autogen: _foreach_clamp_max.List_out
- # foreach_minimum/maximum dispatches to clamp_max/min
- - func: _foreach_maximum.List(Tensor[] self, Tensor[] other) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_clamp_min_list_kernel_slow
- CUDA: foreach_tensor_clamp_min_list_kernel_cuda
- - func: _foreach_maximum_.List(Tensor(a!)[] self, Tensor[] other) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_clamp_min_list_kernel_slow_
- CUDA: foreach_tensor_clamp_min_list_kernel_cuda_
- autogen: _foreach_maximum.List_out
- - func: _foreach_minimum.List(Tensor[] self, Tensor[] other) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_clamp_max_list_kernel_slow
- CUDA: foreach_tensor_clamp_max_list_kernel_cuda
- - func: _foreach_minimum_.List(Tensor(a!)[] self, Tensor[] other) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_clamp_max_list_kernel_slow_
- CUDA: foreach_tensor_clamp_max_list_kernel_cuda_
- autogen: _foreach_minimum.List_out
- - func: _foreach_add.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_add_scalarlist_kernel_slow
- CUDA: foreach_tensor_add_scalarlist_kernel_cuda
- - func: _foreach_add_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_add_scalarlist_kernel_slow_
- CUDA: foreach_tensor_add_scalarlist_kernel_cuda_
- autogen: _foreach_add.ScalarList_out
- - func: _foreach_sub.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_sub_scalarlist_kernel_slow
- CUDA: foreach_tensor_sub_scalarlist_kernel_cuda
- - func: _foreach_sub_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_sub_scalarlist_kernel_slow_
- CUDA: foreach_tensor_sub_scalarlist_kernel_cuda_
- autogen: _foreach_sub.ScalarList_out
- - func: _foreach_div.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_div_scalarlist_kernel_slow
- CUDA: foreach_tensor_div_scalarlist_kernel_cuda
- - func: _foreach_div_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_div_scalarlist_kernel_slow_
- CUDA: foreach_tensor_div_scalarlist_kernel_cuda_
- autogen: _foreach_div.ScalarList_out
- - func: _foreach_mul.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_mul_scalarlist_kernel_slow
- CUDA: foreach_tensor_mul_scalarlist_kernel_cuda
- - func: _foreach_mul_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_mul_scalarlist_kernel_slow_
- CUDA: foreach_tensor_mul_scalarlist_kernel_cuda_
- autogen: _foreach_mul.ScalarList_out
- - func: _foreach_clamp_min.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_clamp_min_scalarlist_kernel_slow
- CUDA: foreach_tensor_clamp_min_scalarlist_kernel_cuda
- - func: _foreach_clamp_min_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_clamp_min_scalarlist_kernel_slow_
- CUDA: foreach_tensor_clamp_min_scalarlist_kernel_cuda_
- autogen: _foreach_clamp_min.ScalarList_out
- - func: _foreach_clamp_max.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_clamp_max_scalarlist_kernel_slow
- CUDA: foreach_tensor_clamp_max_scalarlist_kernel_cuda
- - func: _foreach_clamp_max_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_clamp_max_scalarlist_kernel_slow_
- CUDA: foreach_tensor_clamp_max_scalarlist_kernel_cuda_
- autogen: _foreach_clamp_max.ScalarList_out
- # foreach_minimum/maximum dispatches to clamp_max/min
- - func: _foreach_maximum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_clamp_min_scalarlist_kernel_slow
- CUDA: foreach_tensor_clamp_min_scalarlist_kernel_cuda
- - func: _foreach_maximum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_clamp_min_scalarlist_kernel_slow_
- CUDA: foreach_tensor_clamp_min_scalarlist_kernel_cuda_
- autogen: _foreach_maximum.ScalarList_out
- - func: _foreach_minimum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_clamp_max_scalarlist_kernel_slow
- CUDA: foreach_tensor_clamp_max_scalarlist_kernel_cuda
- - func: _foreach_minimum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_clamp_max_scalarlist_kernel_slow_
- CUDA: foreach_tensor_clamp_max_scalarlist_kernel_cuda_
- autogen: _foreach_minimum.ScalarList_out
- - func: _foreach_exp(Tensor[] self) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_exp_slow
- CUDA: foreach_tensor_exp_cuda
- - func: _foreach_zero_(Tensor(a!)[] self) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_zero_slow_
- CUDA: foreach_tensor_zero_cuda_
- autogen: _foreach_zero, _foreach_zero.out
- - func: _foreach_exp_(Tensor(a!)[] self) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_exp_slow_
- CUDA: foreach_tensor_exp_cuda_
- autogen: _foreach_exp.out
- - func: _foreach_sqrt(Tensor[] self) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_sqrt_slow
- CUDA: foreach_tensor_sqrt_cuda
- - func: _foreach_sqrt_(Tensor(a!)[] self) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_sqrt_slow_
- CUDA: foreach_tensor_sqrt_cuda_
- autogen: _foreach_sqrt.out
- - func: _foreach_abs(Tensor[] self) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_abs_slow
- CUDA: foreach_tensor_abs_cuda
- - func: _foreach_abs_(Tensor(a!)[] self) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_abs_slow_
- CUDA: foreach_tensor_abs_cuda_
- autogen: _foreach_abs.out
- - func: _foreach_acos(Tensor[] self) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_acos_slow
- CUDA: foreach_tensor_acos_cuda
- - func: _foreach_acos_(Tensor(a!)[] self) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_acos_slow_
- CUDA: foreach_tensor_acos_cuda_
- autogen: _foreach_acos.out
- - func: _foreach_asin(Tensor[] self) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_asin_slow
- CUDA: foreach_tensor_asin_cuda
- - func: _foreach_asin_(Tensor(a!)[] self) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_asin_slow_
- CUDA: foreach_tensor_asin_cuda_
- autogen: _foreach_asin.out
- - func: _foreach_atan(Tensor[] self) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_atan_slow
- CUDA: foreach_tensor_atan_cuda
- - func: _foreach_atan_(Tensor(a!)[] self) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_atan_slow_
- CUDA: foreach_tensor_atan_cuda_
- autogen: _foreach_atan.out
- - func: _foreach_ceil(Tensor[] self) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_ceil_slow
- CUDA: foreach_tensor_ceil_cuda
- - func: _foreach_ceil_(Tensor(a!)[] self) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_ceil_slow_
- CUDA: foreach_tensor_ceil_cuda_
- autogen: _foreach_ceil.out
- - func: _foreach_cos(Tensor[] self) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_cos_slow
- CUDA: foreach_tensor_cos_cuda
- - func: _foreach_cos_(Tensor(a!)[] self) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_cos_slow_
- CUDA: foreach_tensor_cos_cuda_
- autogen: _foreach_cos.out
- - func: _foreach_cosh(Tensor[] self) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_cosh_slow
- CUDA: foreach_tensor_cosh_cuda
- - func: _foreach_cosh_(Tensor(a!)[] self) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_cosh_slow_
- CUDA: foreach_tensor_cosh_cuda_
- autogen: _foreach_cosh.out
- - func: _foreach_erf(Tensor[] self) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_erf_slow
- CUDA: foreach_tensor_erf_cuda
- - func: _foreach_erf_(Tensor(a!)[] self) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_erf_slow_
- CUDA: foreach_tensor_erf_cuda_
- autogen: _foreach_erf.out
- - func: _foreach_erfc(Tensor[] self) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_erfc_slow
- CUDA: foreach_tensor_erfc_cuda
- - func: _foreach_erfc_(Tensor(a!)[] self) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_erfc_slow_
- CUDA: foreach_tensor_erfc_cuda_
- autogen: _foreach_erfc.out
- - func: _foreach_expm1(Tensor[] self) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_expm1_slow
- CUDA: foreach_tensor_expm1_cuda
- - func: _foreach_expm1_(Tensor(a!)[] self) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_expm1_slow_
- CUDA: foreach_tensor_expm1_cuda_
- autogen: _foreach_expm1.out
- - func: _foreach_floor(Tensor[] self) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_floor_slow
- CUDA: foreach_tensor_floor_cuda
- - func: _foreach_floor_(Tensor(a!)[] self) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_floor_slow_
- CUDA: foreach_tensor_floor_cuda_
- autogen: _foreach_floor.out
- - func: _foreach_log(Tensor[] self) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_log_slow
- CUDA: foreach_tensor_log_cuda
- - func: _foreach_log_(Tensor(a!)[] self) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_log_slow_
- CUDA: foreach_tensor_log_cuda_
- autogen: _foreach_log.out
- - func: _foreach_log10(Tensor[] self) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_log10_slow
- CUDA: foreach_tensor_log10_cuda
- - func: _foreach_log10_(Tensor(a!)[] self) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_log10_slow_
- CUDA: foreach_tensor_log10_cuda_
- autogen: _foreach_log10.out
- - func: _foreach_log1p(Tensor[] self) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_log1p_slow
- CUDA: foreach_tensor_log1p_cuda
- - func: _foreach_log1p_(Tensor(a!)[] self) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_log1p_slow_
- CUDA: foreach_tensor_log1p_cuda_
- autogen: _foreach_log1p.out
- - func: _foreach_log2(Tensor[] self) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_log2_slow
- CUDA: foreach_tensor_log2_cuda
- - func: _foreach_log2_(Tensor(a!)[] self) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_log2_slow_
- CUDA: foreach_tensor_log2_cuda_
- autogen: _foreach_log2.out
- - func: _foreach_neg(Tensor[] self) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_neg_slow
- CUDA: foreach_tensor_neg_cuda
- - func: _foreach_neg_(Tensor(a!)[] self) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_neg_slow_
- CUDA: foreach_tensor_neg_cuda_
- autogen: _foreach_neg.out
- - func: _foreach_tan(Tensor[] self) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_tan_slow
- CUDA: foreach_tensor_tan_cuda
- - func: _foreach_tan_(Tensor(a!)[] self) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_tan_slow_
- CUDA: foreach_tensor_tan_cuda_
- autogen: _foreach_tan.out
- - func: _foreach_tanh(Tensor[] self) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_tanh_slow
- CUDA: foreach_tensor_tanh_cuda
- - func: _foreach_tanh_(Tensor(a!)[] self) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_tanh_slow_
- CUDA: foreach_tensor_tanh_cuda_
- autogen: _foreach_tanh.out
- - func: _foreach_sin(Tensor[] self) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_sin_slow
- CUDA: foreach_tensor_sin_cuda
- - func: _foreach_sin_(Tensor(a!)[] self) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_sin_slow_
- CUDA: foreach_tensor_sin_cuda_
- autogen: _foreach_sin.out
- - func: _foreach_sinh(Tensor[] self) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_sinh_slow
- CUDA: foreach_tensor_sinh_cuda
- - func: _foreach_sinh_(Tensor(a!)[] self) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_sinh_slow_
- CUDA: foreach_tensor_sinh_cuda_
- autogen: _foreach_sinh.out
- - func: _foreach_round(Tensor[] self) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_round_slow
- CUDA: foreach_tensor_round_cuda
- - func: _foreach_round_(Tensor(a!)[] self) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_round_slow_
- CUDA: foreach_tensor_round_cuda_
- autogen: _foreach_round.out
- - func: _foreach_lgamma(Tensor[] self) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_lgamma_slow
- CUDA: foreach_tensor_lgamma_cuda
- - func: _foreach_lgamma_(Tensor(a!)[] self) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_lgamma_slow_
- CUDA: foreach_tensor_lgamma_cuda_
- autogen: _foreach_lgamma.out
- - func: _foreach_frac(Tensor[] self) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_frac_slow
- CUDA: foreach_tensor_frac_cuda
- - func: _foreach_frac_(Tensor(a!)[] self) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_frac_slow_
- CUDA: foreach_tensor_frac_cuda_
- autogen: _foreach_frac.out
- - func: _foreach_reciprocal(Tensor[] self) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_reciprocal_slow
- CUDA: foreach_tensor_reciprocal_cuda
- - func: _foreach_reciprocal_(Tensor(a!)[] self) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_reciprocal_slow_
- CUDA: foreach_tensor_reciprocal_cuda_
- autogen: _foreach_reciprocal.out
- - func: _foreach_sigmoid(Tensor[] self) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_sigmoid_slow
- CUDA: foreach_tensor_sigmoid_cuda
- - func: _foreach_sigmoid_(Tensor(a!)[] self) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_sigmoid_slow_
- CUDA: foreach_tensor_sigmoid_cuda_
- autogen: _foreach_sigmoid.out
- - func: _foreach_trunc(Tensor[] self) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_trunc_slow
- CUDA: foreach_tensor_trunc_cuda
- - func: _foreach_trunc_(Tensor(a!)[] self) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_trunc_slow_
- CUDA: foreach_tensor_trunc_cuda_
- autogen: _foreach_trunc.out
- - func: _foreach_addcdiv_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_addcdiv_scalar_slow_
- CUDA: foreach_tensor_addcdiv_scalar_cuda_
- autogen: _foreach_addcdiv.Scalar_out
- - func: _foreach_addcmul_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_addcmul_scalar_slow_
- CUDA: foreach_tensor_addcmul_scalar_cuda_
- autogen: _foreach_addcmul.Scalar_out
- - func: _foreach_addcdiv_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_addcdiv_scalarlist_slow_
- CUDA: foreach_tensor_addcdiv_scalarlist_cuda_
- autogen: _foreach_addcdiv.ScalarList_out
- - func: _foreach_addcdiv_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_addcdiv_tensor_slow_
- CUDA: foreach_tensor_addcdiv_tensor_cuda_
- autogen: _foreach_addcdiv.Tensor_out
- - func: _foreach_addcmul_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_addcmul_scalarlist_slow_
- CUDA: foreach_tensor_addcmul_scalarlist_cuda_
- autogen: _foreach_addcmul.ScalarList_out
- - func: _foreach_addcmul_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_addcmul_tensor_slow_
- CUDA: foreach_tensor_addcmul_tensor_cuda_
- autogen: _foreach_addcmul.Tensor_out
- - func: _foreach_addcdiv.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_addcdiv_scalar_slow
- CUDA: foreach_tensor_addcdiv_scalar_cuda
- - func: _foreach_addcmul.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_addcmul_scalar_slow
- CUDA: foreach_tensor_addcmul_scalar_cuda
- - func: _foreach_addcdiv.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_addcdiv_scalarlist_slow
- CUDA: foreach_tensor_addcdiv_scalarlist_cuda
- - func: _foreach_addcdiv.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_addcdiv_tensor_slow
- CUDA: foreach_tensor_addcdiv_tensor_cuda
- - func: _foreach_addcmul.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_addcmul_scalarlist_slow
- CUDA: foreach_tensor_addcmul_scalarlist_cuda
- - func: _foreach_addcmul.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_addcmul_tensor_slow
- CUDA: foreach_tensor_addcmul_tensor_cuda
- - func: _foreach_norm.Scalar(Tensor[] self, Scalar ord=2) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_norm_slow
- CUDA: foreach_tensor_norm_cuda
- autogen: _foreach_norm.Scalar_out
- - func: _foreach_lerp.List(Tensor[] self, Tensor[] tensors1, Tensor[] weights) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensors are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_ternary_lerp_slow
- CUDA: foreach_tensor_lerp_ternary_cuda
- autogen: _foreach_lerp.List_out
- - func: _foreach_lerp_.List(Tensor(a!)[] self, Tensor[] tensors1, Tensor[] weights) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensors are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_ternary_lerp_slow_
- CUDA: foreach_tensor_lerp_ternary_cuda_
- autogen: _foreach_lerp.List_out
- - func: _foreach_lerp.Scalar(Tensor[] self, Tensor[] tensors1, Scalar weight) -> Tensor[]
- device_check: NoCheck # foreach kernels fall back to slow path when tensors are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_lerp_list_kernel_slow
- CUDA: foreach_tensor_lerp_list_cuda
- autogen: _foreach_lerp.Scalar_out
- - func: _foreach_lerp_.Scalar(Tensor(a!)[] self, Tensor[] tensors1, Scalar weight) -> ()
- device_check: NoCheck # foreach kernels fall back to slow path when tensors are on different devices
- variants: function
- dispatch:
- CPU: foreach_tensor_lerp_list_kernel_slow_
- CUDA: foreach_tensor_lerp_list_cuda_
- autogen: _foreach_lerp.Scalar_out
- - func: bucketize.Tensor(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor
- dispatch:
- CPU: bucketize_cpu
- CUDA: bucketize_cuda
- - func: bucketize.Tensor_out(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU: bucketize_out_cpu
- CUDA: bucketize_out_cuda
- - func: bucketize.Scalar(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor
- dispatch:
- CPU: bucketize_cpu
- CUDA: bucketize_cuda
- autogen: bucketize.Scalar_out
- - func: searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor
- dispatch:
- CPU: searchsorted_cpu
- CUDA: searchsorted_cuda
- - func: searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU: searchsorted_out_cpu
- CUDA: searchsorted_out_cuda
- - func: searchsorted.Scalar(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor
- dispatch:
- CPU: searchsorted_cpu
- CUDA: searchsorted_cuda
- autogen: searchsorted.Scalar_out
- - func: _convert_indices_from_coo_to_csr(Tensor self, int size, *, bool out_int32=False) -> Tensor
- structured_delegate: _convert_indices_from_coo_to_csr.out
- - func: _convert_indices_from_coo_to_csr.out(Tensor self, int size, *, bool out_int32=False, Tensor(a!) out) -> Tensor(a!)
- structured: True
- dispatch:
- CPU: _convert_indices_from_coo_to_csr_structured_cpu
- CUDA: _convert_indices_from_coo_to_csr_structured_cuda
- - func: _convert_indices_from_csr_to_coo(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False) -> Tensor
- structured_delegate: _convert_indices_from_csr_to_coo.out
- - func: _convert_indices_from_csr_to_coo.out(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False, Tensor(a!) out) -> Tensor(a!)
- structured: True
- dispatch:
- CPU: _convert_indices_from_csr_to_coo_structured_cpu
- CUDA: _convert_indices_from_csr_to_coo_structured_cuda
- ## NN wrappers
- - func: mse_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- python_module: nn
- dispatch:
- CPU, CUDA: mse_loss_out
- MPS: mse_loss_out_mps
- - func: mse_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: mse_loss.out
- python_module: nn
- - func: mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- dispatch:
- CPU, CUDA: mse_loss_backward_out
- MPS: mse_loss_backward_out_mps
- - func: mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor
- python_module: nn
- dispatch:
- CPU, CUDA: mse_loss_backward
- MPS: mse_loss_backward_mps
- - func: l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
- python_module: nn
- - func: multi_margin_loss.out(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
- python_module: nn
- dispatch:
- CPU: multi_margin_loss_cpu_out
- CUDA: multi_margin_loss_cuda_out
- - func: multi_margin_loss(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean) -> Tensor
- python_module: nn
- dispatch:
- CPU: multi_margin_loss_cpu
- CUDA: multi_margin_loss_cuda
- - func: multi_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- dispatch:
- CPU: multi_margin_loss_cpu_backward_out
- CUDA: multi_margin_loss_cuda_backward_out
- - func: multi_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean) -> Tensor
- python_module: nn
- dispatch:
- CPU: multi_margin_loss_cpu_backward
- CUDA: multi_margin_loss_cuda_backward
- - func: multilabel_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
- python_module: nn
- - func: multilabel_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
- python_module: nn
- - func: multilabel_margin_loss_forward.output(Tensor self, Tensor target, int reduction, *, Tensor(a!) output, Tensor(b!) is_target) -> (Tensor(a!), Tensor(b!))
- python_module: nn
- dispatch:
- CPU: multilabel_margin_loss_forward_out_cpu
- CUDA: multilabel_margin_loss_forward_out_cuda
- - func: multilabel_margin_loss_forward(Tensor self, Tensor target, int reduction) -> (Tensor output, Tensor is_target)
- python_module: nn
- dispatch:
- CPU: multilabel_margin_loss_forward_cpu
- CUDA: multilabel_margin_loss_forward_cuda
- - func: multilabel_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- dispatch:
- CPU: multilabel_margin_loss_backward_cpu_out
- CUDA: multilabel_margin_loss_backward_cuda_out
- - func: multilabel_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target) -> Tensor
- python_module: nn
- dispatch:
- CPU: multilabel_margin_loss_backward_cpu
- CUDA: multilabel_margin_loss_backward_cuda
- - func: nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)
- python_module: nn
- - func: nll_loss_nd(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor
- python_module: nn
- dispatch:
- CompositeImplicitAutograd: nll_loss_nd_symint
- - func: nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor
- python_module: nn
- dispatch:
- CompositeImplicitAutograd: nll_loss_symint
- - func: nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))
- python_module: nn
- structured: True
- dispatch:
- CPU: nll_loss_forward_out_cpu
- CUDA: nll_loss_forward_out_cuda
- MPS: nll_loss_forward_out_mps
- - func: nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight)
- python_module: nn
- structured_delegate: nll_loss_forward.output
- - func: nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: nll_loss_backward_out_cpu
- CUDA: nll_loss_backward_out_cuda
- MPS: nll_loss_backward_out_mps
- - func: nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor
- python_module: nn
- structured_delegate: nll_loss_backward.grad_input
- - func: nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)
- python_module: nn
- - func: nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor
- python_module: nn
- dispatch:
- CompositeImplicitAutograd: nll_loss2d_symint
- - func: nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))
- python_module: nn
- dispatch:
- CPU: nll_loss2d_forward_out_cpu
- CUDA: nll_loss2d_forward_out_cuda
- MPS: nll_loss2d_forward_out_mps
- - func: nll_loss2d_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight)
- python_module: nn
- dispatch:
- CPU: nll_loss2d_forward_cpu
- CUDA: nll_loss2d_forward_cuda
- MPS: nll_loss2d_forward_mps
- - func: nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- dispatch:
- CPU: nll_loss2d_backward_out_cpu
- CUDA: nll_loss2d_backward_out_cuda
- MPS: nll_loss2d_backward_out_mps
- - func: nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor
- python_module: nn
- dispatch:
- CPU: nll_loss2d_backward_cpu
- CUDA: nll_loss2d_backward_cuda
- MPS: nll_loss2d_backward_mps
- - func: smooth_l1_loss.out(Tensor self, Tensor target, int reduction=Mean, float beta=1.0, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- python_module: nn
- dispatch:
- CPU, CUDA: smooth_l1_loss_out
- MPS: smooth_l1_loss_out_mps
- - func: smooth_l1_loss(Tensor self, Tensor target, int reduction=Mean, float beta=1.0) -> Tensor
- device_check: NoCheck # TensorIterator
- structured_delegate: smooth_l1_loss.out
- python_module: nn
- - func: smooth_l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- dispatch:
- CPU: smooth_l1_loss_backward_out
- CUDA: smooth_l1_loss_backward_out
- MPS: smooth_l1_loss_backward_out_mps
- - func: smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta) -> Tensor
- python_module: nn
- dispatch:
- CompositeExplicitAutograd: smooth_l1_loss_backward
- - func: huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!)
- python_module: nn
- dispatch:
- CPU, CUDA: huber_loss_out
- MPS: huber_loss_out_mps
- - func: huber_loss(Tensor self, Tensor target, int reduction=Mean, float delta=1.0) -> Tensor
- python_module: nn
- dispatch:
- CPU, CUDA: huber_loss
- MPS: huber_loss_mps
- - func: huber_loss_backward.out(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- dispatch:
- CPU, CUDA: huber_loss_backward_out
- MPS: huber_loss_backward_out_mps
- - func: huber_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta) -> Tensor
- python_module: nn
- dispatch:
- CompositeExplicitAutograd: huber_loss_backward
- - func: soft_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
- python_module: nn
- dispatch:
- CompositeExplicitAutograd: soft_margin_loss_out
- - func: soft_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
- python_module: nn
- dispatch:
- CompositeExplicitAutograd: soft_margin_loss
- - func: soft_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- dispatch:
- CompositeExplicitAutograd: soft_margin_loss_backward_out
- - func: soft_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor
- python_module: nn
- dispatch:
- CompositeExplicitAutograd: soft_margin_loss_backward
- - func: elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- device_check: NoCheck # TensorIterator
- python_module: nn
- dispatch:
- CPU, CUDA: elu_out
- MPS: elu_out_mps
- - func: elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor
- structured_delegate: elu.out
- device_check: NoCheck # TensorIterator
- python_module: nn
- - func: elu_backward.grad_input(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result, *, Tensor(a!) grad_input) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- python_module: nn
- dispatch:
- CPU, CUDA: elu_backward_out
- MPS: elu_backward_out_mps
- - func: elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result) -> Tensor
- structured_delegate: elu_backward.grad_input
- python_module: nn
- - func: elu_(Tensor(a!) self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor(a!)
- structured_delegate: elu.out
- device_check: NoCheck # TensorIterator
- python_module: nn
- - func: glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- python_module: nn
- dispatch:
- CPU, CUDA: glu_out
- MPS: glu_out_mps
- - func: glu(Tensor self, int dim=-1) -> Tensor
- structured_delegate: glu.out
- device_check: NoCheck # TensorIterator
- python_module: nn
- - func: glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- dispatch:
- CPU: glu_backward_cpu_out
- CUDA: glu_backward_cuda_out
- MPS: glu_backward_mps_out
- - func: glu_backward(Tensor grad_output, Tensor self, int dim) -> Tensor
- python_module: nn
- dispatch:
- CPU: glu_backward_cpu
- CUDA: glu_backward_cuda
- MPS: glu_backward_mps
- - func: glu_jvp(Tensor glu, Tensor x, Tensor dx, int dim) -> Tensor
- python_module: nn
- dispatch:
- CPU, CUDA: glu_jvp
- autogen: glu_jvp.out
- - func: glu_backward_jvp(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim) -> Tensor
- python_module: nn
- dispatch:
- CPU, CUDA: glu_backward_jvp
- autogen: glu_backward_jvp.out
- - func: hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- device_check: NoCheck # TensorIterator
- python_module: nn
- dispatch:
- CPU, CUDA: hardsigmoid_out
- QuantizedCPU: hardsigmoid_out_quantized_cpu
- - func: hardsigmoid(Tensor self) -> Tensor
- structured_delegate: hardsigmoid.out
- device_check: NoCheck # TensorIterator
- python_module: nn
- dispatch:
- QuantizedCPU: hardsigmoid_quantized_cpu
- - func: hardsigmoid_(Tensor(a!) self) -> Tensor(a!)
- structured_delegate: hardsigmoid.out
- device_check: NoCheck # TensorIterator
- python_module: nn
- - func: hardsigmoid_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- python_module: nn
- dispatch:
- CPU, CUDA: hardsigmoid_backward_out
- - func: hardsigmoid_backward(Tensor grad_output, Tensor self) -> Tensor
- structured_delegate: hardsigmoid_backward.grad_input
- python_module: nn
- - func: hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- python_module: nn
- dispatch:
- CPU, CUDA, MPS: hardtanh_out
- QuantizedCPU: hardtanh_out_quantized_cpu
- - func: hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor
- device_check: NoCheck # TensorIterator
- python_module: nn
- dispatch:
- CPU, CUDA, MPS: hardtanh
- QuantizedCPU: hardtanh_quantized_cpu
- tags: core
- - func: hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- dispatch:
- CPU, CUDA: hardtanh_backward_out
- MPS: hardtanh_backward_out_mps
- - func: hardtanh_backward(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val) -> Tensor
- python_module: nn
- dispatch:
- CPU, CUDA: hardtanh_backward
- MPS: hardtanh_backward_mps
- - func: hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- python_module: nn
- dispatch:
- CPU, CUDA, MPS: hardtanh_
- QuantizedCPU: hardtanh_quantized_cpu_
- - func: hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- python_module: nn
- dispatch:
- CPU, CUDA: hardswish_out
- MPS: hardswish_out_mps
- - func: hardswish(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- python_module: nn
- dispatch:
- CPU, CUDA: hardswish
- MPS: hardswish_mps
- - func: hardswish_(Tensor(a!) self) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- python_module: nn
- dispatch:
- CPU, CUDA: hardswish_
- MPS: hardswish_mps_
- - func: hardswish_backward(Tensor grad_output, Tensor self) -> Tensor
- python_module: nn
- dispatch:
- CPU, CUDA: hardswish_backward
- MPS: hardswish_backward_mps
- autogen: hardswish_backward.out
- - func: leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- device_check: NoCheck # TensorIterator
- python_module: nn
- dispatch:
- CPU, CUDA: leaky_relu_out
- MPS: leaky_relu_out_mps
- QuantizedCPU: leaky_relu_out_quantized_cpu
- - func: leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor
- structured_delegate: leaky_relu.out
- device_check: NoCheck # TensorIterator
- python_module: nn
- dispatch:
- QuantizedCPU: leaky_relu_quantized_cpu
- tags: core
- - func: leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- python_module: nn
- dispatch:
- CPU, CUDA: leaky_relu_backward_out
- MPS: leaky_relu_backward_out_mps
- - func: leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor
- structured_delegate: leaky_relu_backward.grad_input
- python_module: nn
- - func: leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!)
- structured_delegate: leaky_relu.out
- device_check: NoCheck # TensorIterator
- python_module: nn
- dispatch:
- QuantizedCPU: leaky_relu_quantized_cpu_
- - func: log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- python_module: nn
- - func: log_sigmoid(Tensor self) -> Tensor
- device_check: NoCheck # TensorIterator
- python_module: nn
- - func: log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!))
- device_check: NoCheck # TensorIterator
- python_module: nn
- dispatch:
- CPU: log_sigmoid_forward_out_cpu
- CUDA: log_sigmoid_forward_out_cuda
- - func: log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer)
- device_check: NoCheck # TensorIterator
- python_module: nn
- dispatch:
- CPU: log_sigmoid_forward_cpu
- CUDA: log_sigmoid_forward_cuda
- - func: log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- dispatch:
- CPU: log_sigmoid_backward_cpu_out
- CUDA: log_sigmoid_backward_cuda_out
- - func: log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor
- python_module: nn
- dispatch:
- CPU: log_sigmoid_backward_cpu
- CUDA: log_sigmoid_backward_cuda
- - func: rrelu_with_noise.out(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
- python_module: nn
- tags: nondeterministic_seeded
- dispatch:
- CPU: rrelu_with_noise_out_cpu
- CUDA: rrelu_with_noise_out_cuda
- - func: rrelu_with_noise(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor
- python_module: nn
- dispatch:
- CPU: rrelu_with_noise_cpu
- CUDA: rrelu_with_noise_cuda
- tags: nondeterministic_seeded
- - func: rrelu_with_noise_backward(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result) -> Tensor
- python_module: nn
- dispatch:
- CompositeExplicitAutograd: rrelu_with_noise_backward
- autogen: rrelu_with_noise_backward.out
- - func: rrelu_with_noise_(Tensor(a!) self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!)
- python_module: nn
- tags: nondeterministic_seeded
- dispatch:
- CPU: rrelu_with_noise_cpu_
- CUDA: rrelu_with_noise_cuda_
- - func: softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- device_check: NoCheck # TensorIterator
- python_module: nn
- dispatch:
- CPU, CUDA: softplus_out
- MPS: softplus_out_mps
- - func: softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor
- structured_delegate: softplus.out
- device_check: NoCheck # TensorIterator
- python_module: nn
- - func: softplus_backward.grad_input(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- python_module: nn
- dispatch:
- CPU, CUDA: softplus_backward_out
- MPS: softplus_backward_out_mps
- - func: softplus_backward(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold) -> Tensor
- structured_delegate: softplus_backward.grad_input
- python_module: nn
- - func: softshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- device_check: NoCheck # TensorIterator
- python_module: nn
- dispatch:
- CPU, CUDA: softshrink_out
- - func: softshrink(Tensor self, Scalar lambd=0.5) -> Tensor
- structured_delegate: softshrink.out
- device_check: NoCheck # TensorIterator
- python_module: nn
- - func: softshrink_backward.grad_input(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- python_module: nn
- dispatch:
- CPU, CUDA: softshrink_backward_out
- - func: softshrink_backward(Tensor grad_output, Tensor self, Scalar lambd) -> Tensor
- structured_delegate: softshrink_backward.grad_input
- python_module: nn
- - func: adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
- python_module: nn
- dispatch:
- CPU: adaptive_avg_pool2d_out_cpu
- CUDA: adaptive_avg_pool2d_out_cuda
- MPS: adaptive_avg_pool2d_out_mps
- MkldnnCPU: mkldnn_adaptive_avg_pool2d_out_stub
- - func: adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor
- python_module: nn
- dispatch:
- CompositeImplicitAutograd: adaptive_avg_pool2d_symint
- - func: mkldnn_adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor
- dispatch:
- MkldnnCPU: mkldnn_adaptive_avg_pool2d
- - func: mkldnn_adaptive_avg_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- MkldnnCPU: mkldnn_adaptive_avg_pool2d_out
- - func: mkldnn_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor
- dispatch:
- MkldnnCPU: mkldnn_adaptive_avg_pool2d_backward
- autogen: mkldnn_adaptive_avg_pool2d_backward.out
- - func: _adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor
- dispatch:
- CPU: adaptive_avg_pool2d_cpu
- CUDA: adaptive_avg_pool2d_cuda
- MPS: adaptive_avg_pool2d_mps
- QuantizedCPU: adaptive_avg_pool2d_quantized_cpu
- QuantizedCUDA: adaptive_avg_pool2d_quantized_cuda
- autogen: _adaptive_avg_pool2d.out
- tags: core
- - func: _adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor
- python_module: nn
- dispatch:
- CPU: adaptive_avg_pool2d_backward_cpu
- CUDA: adaptive_avg_pool2d_backward_cuda
- MPS: adaptive_avg_pool2d_backward_mps
- autogen: _adaptive_avg_pool2d_backward.out
- tags: core
- - func: adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!)
- python_module: nn
- dispatch:
- CPU: adaptive_avg_pool3d_out_cpu
- CUDA: adaptive_avg_pool3d_out_cuda
- QuantizedCPU: adaptive_avg_pool3d_out_quantized_cpu
- - func: adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor
- python_module: nn
- dispatch:
- CompositeImplicitAutograd: adaptive_avg_pool3d_symint
- - func: _adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor
- dispatch:
- CPU: adaptive_avg_pool3d_cpu
- CUDA: adaptive_avg_pool3d_cuda
- QuantizedCPU: adaptive_avg_pool3d_quantized_cpu
- autogen: _adaptive_avg_pool3d.out
- - func: adaptive_avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- dispatch:
- CPU: adaptive_avg_pool3d_backward_out_cpu
- CUDA: adaptive_avg_pool3d_backward_out_cuda
- - func: _adaptive_avg_pool3d_backward(Tensor grad_output, Tensor self) -> Tensor
- python_module: nn
- dispatch:
- CPU: adaptive_avg_pool3d_backward_cpu
- CUDA: adaptive_avg_pool3d_backward_cuda
- autogen: _adaptive_avg_pool3d_backward.out
- # Return: (Tensor output, Tensor indices)
- - func: adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
- python_module: nn
- structured: True
- dispatch:
- CPU: adaptive_max_pool2d_out_cpu
- CUDA: adaptive_max_pool2d_out_cuda
- MPS: adaptive_max_pool2d_out_mps
- # Return: (Tensor output, Tensor indices)
- - func: adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor)
- python_module: nn
- structured_delegate: adaptive_max_pool2d.out
- - func: adaptive_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: adaptive_max_pool2d_backward_out_cpu
- CUDA: adaptive_max_pool2d_backward_out_cuda
- MPS: adaptive_max_pool2d_backward_out_mps
- - func: adaptive_max_pool2d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor
- python_module: nn
- structured_delegate: adaptive_max_pool2d_backward.grad_input
- # Return: (Tensor output, Tensor indices)
- - func: adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
- python_module: nn
- structured: True
- dispatch:
- CPU: adaptive_max_pool3d_out_cpu
- CUDA: adaptive_max_pool3d_out_cuda
- # Return: (Tensor output, Tensor indices)
- - func: adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor)
- python_module: nn
- structured_delegate: adaptive_max_pool3d.out
- - func: adaptive_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: adaptive_max_pool3d_backward_out_cpu
- CUDA: adaptive_max_pool3d_backward_out_cuda
- - func: adaptive_max_pool3d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor
- python_module: nn
- structured_delegate: adaptive_max_pool3d_backward.grad_input
- - func: avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)
- python_module: nn
- structured: True
- precomputed:
- - kernel_size -> int kH, int kW
- - stride -> int dH, int dW
- - padding -> int padH, int padW
- dispatch:
- CPU: avg_pool2d_out_cpu
- CUDA: avg_pool2d_out_cuda
- MPS: avg_pool2d_out_mps
- MkldnnCPU: mkldnn_avg_pool2d_out
- - func: avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor
- python_module: nn
- structured_delegate: avg_pool2d.out
- dispatch:
- MkldnnCPU: mkldnn_avg_pool2d
- QuantizedCPU: avg_pool2d_quantized_cpu
- tags: core
- - func: avg_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: avg_pool2d_backward_out_cpu
- CUDA: avg_pool2d_backward_out_cuda
- MPS: avg_pool2d_backward_out_mps
- MkldnnCPU: mkldnn_avg_pool2d_backward_out
- - func: avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor
- python_module: nn
- structured_delegate: avg_pool2d_backward.grad_input
- dispatch:
- MkldnnCPU: mkldnn_avg_pool2d_backward
- tags: core
- - func: avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: avg_pool3d_out_cpu
- CUDA: avg_pool3d_out_cuda
- MkldnnCPU: mkldnn_avg_pool3d_out
- - func: avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor
- python_module: nn
- structured_delegate: avg_pool3d.out
- dispatch:
- MkldnnCPU: mkldnn_avg_pool3d
- QuantizedCPU: avg_pool3d_quantized_cpu
- - func: avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: avg_pool3d_backward_out_cpu
- CUDA: avg_pool3d_backward_out_cuda
- MkldnnCPU: mkldnn_avg_pool3d_backward_out
- - func: avg_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor
- python_module: nn
- structured_delegate: avg_pool3d_backward.grad_input
- dispatch:
- MkldnnCPU: mkldnn_avg_pool3d_backward
- # Return: (Tensor output, Tensor indices)
- - func: fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
- python_module: nn
- structured: True
- dispatch:
- CPU: fractional_max_pool2d_out_cpu
- CUDA: fractional_max_pool2d_out_cuda
- # Return: (Tensor output, Tensor indices)
- - func: fractional_max_pool2d(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples) -> (Tensor, Tensor)
- python_module: nn
- structured_delegate: fractional_max_pool2d.output
- - func: fractional_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: fractional_max_pool2d_backward_cpu
- CUDA: fractional_max_pool2d_backward_cuda
- - func: fractional_max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices) -> Tensor
- python_module: nn
- structured_delegate: fractional_max_pool2d_backward.grad_input
- # Return: (Tensor output, Tensor indices)
- - func: fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
- python_module: nn
- structured: True
- precomputed:
- - kernel_size -> int poolSizeT, int poolSizeH, int poolSizeW
- - output_size -> int outputT, int outputH, int outputW
- - int numBatch, int numPlanes, int inputT, int inputH, int inputW
- dispatch:
- CPU: fractional_max_pool3d_out_cpu
- CUDA: fractional_max_pool3d_out_cuda
- # Return: (Tensor output, Tensor indices)
- - func: fractional_max_pool3d(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples) -> (Tensor, Tensor)
- python_module: nn
- structured_delegate: fractional_max_pool3d.output
- - func: fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- dispatch:
- CPU: fractional_max_pool3d_backward_out_cpu
- CUDA: fractional_max_pool3d_backward_out_cuda
- - func: fractional_max_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices) -> Tensor
- python_module: nn
- dispatch:
- CPU: fractional_max_pool3d_backward_cpu
- CUDA: fractional_max_pool3d_backward_cuda
- # Return: (Tensor output, Tensor indices)
- - func: max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
- python_module: nn
- structured: True
- dispatch:
- CPU: max_pool2d_with_indices_out_cpu
- CUDA: max_pool2d_with_indices_out_cuda
- MPS: max_pool2d_with_indices_out_mps
- # Return: (Tensor output, Tensor indices)
- - func: max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)
- python_module: nn
- structured_delegate: max_pool2d_with_indices.out
- tags: core
- - func: max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: max_pool2d_with_indices_backward_out_cpu
- CUDA: max_pool2d_with_indices_backward_out_cuda
- MPS: max_pool2d_with_indices_backward_out_mps
- - func: max_pool2d_with_indices_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices) -> Tensor
- python_module: nn
- structured_delegate: max_pool2d_with_indices_backward.grad_input
- tags: core
- # Return: (Tensor output, Tensor indices)
- - func: max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
- python_module: nn
- dispatch:
- CPU: max_pool3d_with_indices_out_cpu
- CUDA: max_pool3d_with_indices_out_cuda
- # Return: (Tensor output, Tensor indices)
- - func: max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)
- python_module: nn
- dispatch:
- CPU: max_pool3d_with_indices_cpu
- CUDA: max_pool3d_with_indices_cuda
- tags: core
- - func: max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- dispatch:
- CPU: max_pool3d_with_indices_backward_out_cpu
- CUDA: max_pool3d_with_indices_backward_out_cuda
- - func: max_pool3d_with_indices_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices) -> Tensor
- python_module: nn
- dispatch:
- CPU: max_pool3d_with_indices_backward_cpu
- CUDA: max_pool3d_with_indices_backward_cuda
- - func: max_unpool2d.out(Tensor self, Tensor indices, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
- python_module: nn
- dispatch:
- CPU: max_unpooling2d_forward_out_cpu
- CUDA: max_unpooling2d_forward_out_cuda
- - func: max_unpool2d(Tensor self, Tensor indices, int[2] output_size) -> Tensor
- python_module: nn
- dispatch:
- CPU: max_unpooling2d_forward_cpu
- CUDA: max_unpooling2d_forward_cuda
- - func: max_unpool3d.out(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!)
- python_module: nn
- dispatch:
- CPU: max_unpooling3d_forward_out_cpu
- CUDA: max_unpooling3d_forward_out_cuda
- - func: max_unpool3d(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding) -> Tensor
- python_module: nn
- dispatch:
- CPU: max_unpooling3d_forward_cpu
- CUDA: max_unpooling3d_forward_cuda
- - func: reflection_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: reflection_pad1d_out_cpu
- QuantizedCPU: reflection_pad1d_out_quantized_cpu
- CUDA: reflection_pad1d_out_cuda
- MPS: reflection_pad1d_out_mps
- - func: reflection_pad1d(Tensor self, SymInt[2] padding) -> Tensor
- python_module: nn
- structured_delegate: reflection_pad1d.out
- - func: reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: reflection_pad1d_backward_out_cpu
- CUDA: reflection_pad1d_backward_out_cuda
- MPS: reflection_pad1d_backward_out_mps
- - func: reflection_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor
- python_module: nn
- structured_delegate: reflection_pad1d_backward.grad_input
- - func: reflection_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)
- python_module: nn
- dispatch:
- CPU, QuantizedCPU: reflection_pad2d_out_cpu
- CUDA: reflection_pad2d_out_cuda
- MPS: reflection_pad2d_out_mps
- - func: reflection_pad2d(Tensor self, SymInt[4] padding) -> Tensor
- python_module: nn
- dispatch:
- CPU: reflection_pad2d_cpu
- QuantizedCPU: reflection_pad2d_quantized_cpu
- CUDA: reflection_pad2d_cuda
- MPS: reflection_pad2d_mps
- tags: core
- - func: reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- dispatch:
- CPU: reflection_pad2d_backward_out_cpu
- CUDA: reflection_pad2d_backward_out_cuda
- MPS: reflection_pad2d_backward_out_mps
- - func: reflection_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor
- python_module: nn
- dispatch:
- CPU: reflection_pad2d_backward_cpu
- CUDA: reflection_pad2d_backward_cuda
- MPS: reflection_pad2d_backward_mps
- - func: reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: reflection_pad3d_out_cpu
- CUDA: reflection_pad3d_out_cuda
- MPS: reflection_pad3d_out_mps
- - func: reflection_pad3d(Tensor self, SymInt[6] padding) -> Tensor
- python_module: nn
- structured_delegate: reflection_pad3d.out
- - func: reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: reflection_pad3d_backward_out_cpu
- CUDA: reflection_pad3d_backward_out_cuda
- MPS: reflection_pad3d_backward_out_mps
- - func: reflection_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor
- python_module: nn
- structured_delegate: reflection_pad3d_backward.grad_input
- - func: replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: replication_pad1d_out_cpu
- CUDA: replication_pad1d_out_cuda
- MPS: replication_pad1d_out_mps
- - func: replication_pad1d(Tensor self, SymInt[2] padding) -> Tensor
- python_module: nn
- structured_delegate: replication_pad1d.out
- - func: replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: replication_pad1d_backward_out_cpu
- CUDA: replication_pad1d_backward_out_cuda
- MPS: replication_pad1d_backward_out_mps
- - func: replication_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor
- python_module: nn
- structured_delegate: replication_pad1d_backward.grad_input
- - func: replication_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: replication_pad2d_out_cpu
- CUDA: replication_pad2d_out_cuda
- MPS: replication_pad2d_out_mps
- - func: replication_pad2d(Tensor self, SymInt[4] padding) -> Tensor
- python_module: nn
- structured_delegate: replication_pad2d.out
- tags: core
- - func: replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- dispatch:
- CPU: replication_pad2d_backward_out_cpu
- CUDA: replication_pad2d_backward_out_cuda
- MPS: replication_pad2d_backward_out_mps
- - func: replication_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor
- python_module: nn
- dispatch:
- CPU: replication_pad2d_backward_cpu
- CUDA: replication_pad2d_backward_cuda
- MPS: replication_pad2d_backward_mps
- - func: replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: replication_pad3d_out_cpu
- CUDA: replication_pad3d_out_cuda
- MPS: replication_pad3d_out_mps
- - func: replication_pad3d(Tensor self, SymInt[6] padding) -> Tensor
- python_module: nn
- structured_delegate: replication_pad3d.out
- tags: core
- - func: replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- dispatch:
- CPU: replication_pad3d_backward_out_cpu
- CUDA: replication_pad3d_backward_out_cuda
- MPS: replication_pad3d_backward_out_mps
- - func: replication_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor
- python_module: nn
- dispatch:
- CPU: replication_pad3d_backward_cpu
- CUDA: replication_pad3d_backward_cuda
- MPS: replication_pad3d_backward_mps
- - func: _pad_circular(Tensor self, SymInt[] pad) -> Tensor
- python_module: nn
- dispatch:
- CompositeImplicitAutograd: _pad_circular_symint
- - func: _pad_enum(Tensor self, SymInt[] pad, int mode, float? value=None) -> Tensor
- python_module: nn
- dispatch:
- CompositeImplicitAutograd: _pad_enum_symint
- - func: pad(Tensor self, SymInt[] pad, str mode="constant", float? value=None) -> Tensor
- python_module: nn
- dispatch:
- CompositeImplicitAutograd: pad_symint
- - func: upsample_linear1d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
- python_module: nn
- autogen: upsample_linear1d.vec_out
- - func: upsample_bilinear2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
- python_module: nn
- autogen: upsample_bilinear2d.vec_out
- tags: core
- - func: _upsample_bilinear2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
- python_module: nn
- autogen: _upsample_bilinear2d_aa.vec_out
- - func: upsample_trilinear3d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
- python_module: nn
- autogen: upsample_trilinear3d.vec_out
- - func: upsample_bicubic2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
- python_module: nn
- autogen: upsample_bicubic2d.vec_out
- - func: _upsample_bicubic2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
- python_module: nn
- autogen: _upsample_bicubic2d_aa.vec_out
- - func: upsample_nearest1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
- python_module: nn
- autogen: upsample_nearest1d.vec_out
- - func: _upsample_nearest_exact1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
- python_module: nn
- autogen: _upsample_nearest_exact1d.vec_out
- - func: upsample_nearest2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
- python_module: nn
- autogen: upsample_nearest2d.vec_out
- tags: core
- - func: _upsample_nearest_exact2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
- python_module: nn
- autogen: _upsample_nearest_exact2d.vec_out
- - func: upsample_nearest3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
- python_module: nn
- autogen: upsample_nearest3d.vec_out
- - func: _upsample_nearest_exact3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
- python_module: nn
- autogen: _upsample_nearest_exact3d.vec_out
- # NOTE: all of the non-"vec" upsample overloads are only kept for backward compatibility.
- - func: upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: upsample_linear1d_out_cpu
- CUDA: upsample_linear1d_out_cuda
- - func: upsample_linear1d(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None) -> Tensor
- python_module: nn
- structured_delegate: upsample_linear1d.out
- - func: upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: upsample_linear1d_backward_out_cpu
- CUDA: upsample_linear1d_backward_out_cuda
- - func: upsample_linear1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None) -> Tensor
- python_module: nn
- structured_delegate: upsample_linear1d_backward.grad_input
- - func: upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: upsample_bilinear2d_out_cpu
- CUDA: upsample_bilinear2d_out_cuda
- MPS: upsample_bilinear2d_out_mps
- - func: upsample_bilinear2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
- python_module: nn
- structured_delegate: upsample_bilinear2d.out
- dispatch:
- QuantizedCPU: upsample_bilinear2d_quantized_cpu
- - func: upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: upsample_bilinear2d_backward_out_cpu
- CUDA: upsample_bilinear2d_backward_out_cuda
- MPS: upsample_bilinear2d_backward_out_mps
- - func: upsample_bilinear2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
- python_module: nn
- structured_delegate: upsample_bilinear2d_backward.grad_input
- - func: _upsample_bilinear2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: _upsample_bilinear2d_aa_out_cpu
- CUDA: _upsample_bilinear2d_aa_out_cuda
- - func: _upsample_bilinear2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
- python_module: nn
- structured_delegate: _upsample_bilinear2d_aa.out
- - func: _upsample_bilinear2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: _upsample_bilinear2d_aa_backward_out_cpu
- CUDA: _upsample_bilinear2d_aa_backward_out_cuda
- - func: _upsample_bilinear2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
- python_module: nn
- structured_delegate: _upsample_bilinear2d_aa_backward.grad_input
- - func: upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: upsample_bicubic2d_out_cpu
- CUDA: upsample_bicubic2d_out_cuda
- - func: upsample_bicubic2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
- python_module: nn
- structured_delegate: upsample_bicubic2d.out
- - func: upsample_bicubic2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: upsample_bicubic2d_backward_out_cpu
- CUDA: upsample_bicubic2d_backward_out_cuda
- - func: upsample_bicubic2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
- python_module: nn
- structured_delegate: upsample_bicubic2d_backward.grad_input
- - func: _upsample_bicubic2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: _upsample_bicubic2d_aa_out_cpu
- CUDA: _upsample_bicubic2d_aa_out_cuda
- - func: _upsample_bicubic2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
- python_module: nn
- structured_delegate: _upsample_bicubic2d_aa.out
- - func: _upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: _upsample_bicubic2d_aa_backward_out_cpu
- CUDA: _upsample_bicubic2d_aa_backward_out_cuda
- - func: _upsample_bicubic2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
- python_module: nn
- structured_delegate: _upsample_bicubic2d_aa_backward.grad_input
- - func: upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: upsample_trilinear3d_out_cpu
- CUDA: upsample_trilinear3d_out_cuda
- - func: upsample_trilinear3d(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
- python_module: nn
- structured_delegate: upsample_trilinear3d.out
- - func: upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: upsample_trilinear3d_backward_out_cpu
- CUDA: upsample_trilinear3d_backward_out_cuda
- - func: upsample_trilinear3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
- python_module: nn
- structured_delegate: upsample_trilinear3d_backward.grad_input
- - func: upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: upsample_nearest1d_out_cpu
- CUDA: upsample_nearest1d_out_cuda
- MPS: upsample_nearest1d_out_mps
- - func: _upsample_nearest_exact1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: _upsample_nearest_exact1d_out_cpu
- CUDA: _upsample_nearest_exact1d_out_cuda
- MPS: _upsample_nearest_exact1d_out_mps
- - func: upsample_nearest1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor
- python_module: nn
- structured_delegate: upsample_nearest1d.out
- - func: _upsample_nearest_exact1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor
- python_module: nn
- structured_delegate: _upsample_nearest_exact1d.out
- - func: upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: upsample_nearest1d_backward_out_cpu
- CUDA: upsample_nearest1d_backward_out_cuda
- MPS: upsample_nearest1d_backward_out_mps
- - func: _upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: _upsample_nearest_exact1d_backward_out_cpu
- CUDA: _upsample_nearest_exact1d_backward_out_cuda
- MPS: _upsample_nearest_exact1d_backward_out_mps
- - func: upsample_nearest1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor
- python_module: nn
- structured_delegate: upsample_nearest1d_backward.grad_input
- - func: _upsample_nearest_exact1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor
- python_module: nn
- structured_delegate: _upsample_nearest_exact1d_backward.grad_input
- - func: upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: upsample_nearest2d_out_cpu
- CUDA: upsample_nearest2d_out_cuda
- MPS: upsample_nearest2d_out_mps
- - func: _upsample_nearest_exact2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: _upsample_nearest_exact2d_out_cpu
- CUDA: _upsample_nearest_exact2d_out_cuda
- MPS: _upsample_nearest_exact2d_out_mps
- - func: upsample_nearest2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor
- python_module: nn
- structured_delegate: upsample_nearest2d.out
- dispatch:
- QuantizedCPU: upsample_nearest2d_quantized_cpu
- - func: _upsample_nearest_exact2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor
- python_module: nn
- structured_delegate: _upsample_nearest_exact2d.out
- dispatch:
- QuantizedCPU: _upsample_nearest_exact2d_quantized_cpu
- - func: upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: upsample_nearest2d_backward_out_cpu
- CUDA: upsample_nearest2d_backward_out_cuda
- MPS: upsample_nearest2d_backward_out_mps
- - func: _upsample_nearest_exact2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: _upsample_nearest_exact2d_backward_out_cpu
- CUDA: _upsample_nearest_exact2d_backward_out_cuda
- MPS: _upsample_nearest_exact2d_backward_out_mps
- - func: upsample_nearest2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor
- python_module: nn
- structured_delegate: upsample_nearest2d_backward.grad_input
- - func: _upsample_nearest_exact2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor
- python_module: nn
- structured_delegate: _upsample_nearest_exact2d_backward.grad_input
- - func: upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: upsample_nearest3d_out_cpu
- CUDA: upsample_nearest3d_out_cuda
- - func: _upsample_nearest_exact3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: _upsample_nearest_exact3d_out_cpu
- CUDA: _upsample_nearest_exact3d_out_cuda
- - func: upsample_nearest3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
- python_module: nn
- structured_delegate: upsample_nearest3d.out
- dispatch:
- QuantizedCPU: upsample_nearest3d_quantized_cpu
- - func: _upsample_nearest_exact3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
- python_module: nn
- structured_delegate: _upsample_nearest_exact3d.out
- dispatch:
- QuantizedCPU: _upsample_nearest_exact3d_quantized_cpu
- - func: upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: upsample_nearest3d_backward_out_cpu
- CUDA: upsample_nearest3d_backward_out_cuda
- - func: _upsample_nearest_exact3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: _upsample_nearest_exact3d_backward_out_cpu
- CUDA: _upsample_nearest_exact3d_backward_out_cuda
- - func: upsample_nearest3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
- python_module: nn
- structured_delegate: upsample_nearest3d_backward.grad_input
- - func: _upsample_nearest_exact3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
- python_module: nn
- structured_delegate: _upsample_nearest_exact3d_backward.grad_input
- - func: sigmoid_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: sigmoid_backward_out
- MPS: sigmoid_backward_out_mps
- tags: pointwise
- - func: sigmoid_backward(Tensor grad_output, Tensor output) -> Tensor
- python_module: nn
- structured_delegate: sigmoid_backward.grad_input
- tags: pointwise
- - func: logit_backward.grad_input(Tensor grad_output, Tensor self, float? eps=None, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: logit_backward_out
- tags: pointwise
- - func: logit_backward(Tensor grad_output, Tensor self, float? eps=None) -> Tensor
- python_module: nn
- structured_delegate: logit_backward.grad_input
- tags: pointwise
- - func: tanh_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)
- python_module: nn
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: tanh_backward_out
- MPS: tanh_backward_out_mps
- tags: pointwise
- - func: tanh_backward(Tensor grad_output, Tensor output) -> Tensor
- python_module: nn
- structured_delegate: tanh_backward.grad_input
- # What's a thnn_conv_ versus a slow_conv_?
- #
- # Historically, we have inefficient implementations of convolutions
- # coming from the THNN/THCUNN library. These convolutions typically
- # operated by computing the Toeplitz matrix and then doing a matrix
- # multiply with the input; this is very memory inefficient! However,
- # occasionally, we really don't have anything better, so it's helpful
- # to have these fallbacks when there is no more optimized implementation
- # in cudnn or mkldnn, etc. Both thnn_ and slow_ convolutions fall
- # into this bucket.
- #
- # The difference between these two designations, is that thnn_ refers
- # to a convolution that is still written in the "legacy" style; that is,
- # C code in the THNN/ or THCUNN/ directory. A slow_ convolution is
- # one that is written in the native style: modern C++. Algorithmically,
- # these are the same thing, but we give them different prefixes to
- # make the operational distinction clear.
- tags: pointwise
- - func: slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
- python_module: nn
- structured: True
- dispatch:
- CPU: slow_conv_transpose2d_structured_cpu
- CUDA: slow_conv_transpose2d_structured_cuda
- - func: slow_conv_transpose2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1) -> Tensor
- python_module: nn
- structured_delegate: slow_conv_transpose2d.out
- - func: slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
- python_module: nn
- dispatch:
- CPU: slow_conv_transpose3d_out_cpu
- CUDA: slow_conv_transpose3d_out_cuda
- - func: slow_conv_transpose3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1) -> Tensor
- python_module: nn
- dispatch:
- CPU: slow_conv_transpose3d_cpu
- CUDA: slow_conv_transpose3d_cuda
- - func: thnn_conv2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, *, Tensor(a!) out) -> Tensor(a!)
- python_module: nn
- - func: thnn_conv2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0) -> Tensor
- python_module: nn
- - func: _slow_conv2d_forward.output(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, *, Tensor(a!) output) -> Tensor(a!)
- python_module: nn
- dispatch:
- CPU: slow_conv2d_forward_out_cpu
- CUDA: slow_conv2d_forward_out_cuda
- - func: _slow_conv2d_forward(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding) -> Tensor
- python_module: nn
- dispatch:
- CPU: slow_conv2d_forward_cpu
- CUDA: slow_conv2d_forward_cuda
- - func: _slow_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))
- python_module: nn
- dispatch:
- CPU: slow_conv2d_backward_out_cpu
- CUDA: slow_conv2d_backward_out_cuda
- - func: _slow_conv2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)
- python_module: nn
- dispatch:
- CPU: slow_conv2d_backward_cpu
- CUDA: slow_conv2d_backward_cuda
- autogen: _slow_conv2d_backward.output_mask_out
- - func: _conv_depthwise2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, SymInt[2] padding, int[2] dilation, *, Tensor(a!) out) -> Tensor(a!)
- use_const_ref_for_mutable_tensors: True
- python_module: nn
- dispatch:
- CUDA: conv_depthwise2d_cuda_out
- - func: _conv_depthwise2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, SymInt[2] padding, int[2] dilation) -> Tensor
- python_module: nn
- dispatch:
- CUDA: conv_depthwise2d_cuda
- - func: conv_depthwise3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation) -> Tensor
- python_module: nn
- dispatch:
- CUDA: conv_depthwise3d_cuda
- autogen: conv_depthwise3d.out
- - func: slow_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!)
- python_module: nn
- - func: slow_conv3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0) -> Tensor
- python_module: nn
- - func: slow_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!)
- python_module: nn
- dispatch:
- CPU: slow_conv3d_forward_out_cpu
- - func: slow_conv3d_forward(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding) -> Tensor
- python_module: nn
- dispatch:
- CPU: slow_conv3d_forward_cpu
- - func: slow_conv_dilated2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1) -> Tensor
- python_module: nn
- dispatch:
- CPU: slow_conv_dilated2d_cpu
- CUDA: slow_conv_dilated2d_cuda
- autogen: slow_conv_dilated2d.out
- - func: slow_conv_dilated3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1) -> Tensor
- python_module: nn
- dispatch:
- CPU: slow_conv_dilated3d_cpu
- CUDA: slow_conv_dilated3d_cuda
- autogen: slow_conv_dilated3d.out
- - func: col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)
- python_module: nn
- dispatch:
- CPU: col2im_out_cpu
- CUDA: col2im_out_cuda
- - func: col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor
- python_module: nn
- dispatch:
- CPU: col2im_cpu
- CUDA: col2im_cuda
- tags: core
- - func: column_stack(Tensor[] tensors) -> Tensor
- - func: column_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
- - func: im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)
- python_module: nn
- dispatch:
- CPU: im2col_out_cpu
- CUDA: im2col_out_cuda
- - func: im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor
- python_module: nn
- dispatch:
- CPU: im2col_cpu
- CUDA: im2col_cuda
- - func: isfinite(Tensor self) -> Tensor
- variants: function, method
- device_check: NoCheck
- device_guard: False
- - func: isinf(Tensor self) -> Tensor
- variants: function, method
- device_check: NoCheck
- device_guard: False
- dispatch:
- CompositeExplicitAutograd: isinf
- SparseCPU, SparseCUDA: isinf_sparse
- SparseMeta: isinf_sparse_meta
- SparseCsrCPU, SparseCsrCUDA: isinf_sparse_csr
- autogen: isinf.out
- tags: core
- - func: record_stream(Tensor(a!) self, Stream s) -> ()
- variants: method
- dispatch:
- CUDA: record_stream_cuda
- - func: isposinf(Tensor self) -> Tensor
- variants: function, method
- structured_delegate: isposinf.out
- dispatch:
- SparseCPU, SparseCUDA: isposinf_sparse
- SparseCsrCPU, SparseCsrCUDA: isposinf_sparse_csr
- tags: pointwise
- - func: isposinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: isposinf_out
- SparseCPU, SparseCUDA: isposinf_sparse_out
- SparseCsrCPU, SparseCsrCUDA: isposinf_sparse_csr_out
- tags: pointwise
- - func: isneginf(Tensor self) -> Tensor
- variants: function, method
- structured_delegate: isneginf.out
- dispatch:
- SparseCPU, SparseCUDA: isneginf_sparse
- SparseCsrCPU, SparseCsrCUDA: isneginf_sparse_csr
- tags: pointwise
- - func: isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: isneginf_out
- SparseCPU, SparseCUDA: isneginf_sparse_out
- SparseCsrCPU, SparseCsrCUDA: isneginf_sparse_csr_out
- tags: pointwise
- # NOTE [_add_batch_dim and _remove_batch_dim]
- # _add_batch_dim and _remove_batch_dim are meant to be used in the implementation
- # of the vmap frontend API (see torch/_vmap_internals.py). They are not
- # user-facing, hence the leading underscore. Please don't use them them anywhere else.
- - func: _add_batch_dim(Tensor self, int batch_dim, int level) -> Tensor
- variants: function
- # See NOTE [_add_batch_dim and _remove_batch_dim]
- - func: _remove_batch_dim(Tensor self, int level, int batch_size, int out_dim) -> Tensor
- variants: function
- ## Functions related to the `torch.special` namespace
- # Note [special namespace binding]
- # Functions in the special python module should have their names start with
- # "special_" underscore and be bound to the desired Python name in
- # torch/special/__init__.py, and the desired C++ name in torch/csrc/api/include/torch/special.h.
- # The "special_" names should be hidden from the user and not documented.
- - func: special_entr(Tensor self) -> Tensor
- structured_delegate: special_entr.out
- python_module: special
- variants: function
- tags: pointwise
- - func: special_entr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- python_module: special
- variants: function
- dispatch:
- CPU, CUDA: special_entr_out
- tags: pointwise
- - func: special_ndtri(Tensor self) -> Tensor
- structured_delegate: special_ndtri.out
- python_module: special
- variants: function
- tags: pointwise
- - func: special_ndtri.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- python_module: special
- variants: function
- dispatch:
- CPU, CUDA: special_ndtri_out
- tags: pointwise
- - func: special_log_ndtr(Tensor self) -> Tensor
- structured_delegate: special_log_ndtr.out
- python_module: special
- variants: function
- tags: pointwise
- - func: special_log_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- structured: True
- structured_inherits: TensorIteratorBase
- python_module: special
- variants: function
- dispatch:
- CPU, CUDA: special_log_ndtr_out
- tags: pointwise
- - func: special_expm1(Tensor self) -> Tensor
- python_module: special
- variants: function
- - func: special_expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- python_module: special
- variants: function
- - func: special_exp2(Tensor self) -> Tensor
- python_module: special
- variants: function
- - func: special_exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- python_module: special
- variants: function
- - func: special_psi(Tensor self) -> Tensor
- python_module: special
- variants: function
- - func: special_psi.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- python_module: special
- variants: function
- - func: special_digamma(Tensor self) -> Tensor
- python_module: special
- variants: function
- - func: special_digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- python_module: special
- variants: function
- - func: special_gammaln(Tensor self) -> Tensor
- python_module: special
- variants: function
- - func: special_gammaln.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- python_module: special
- variants: function
- - func: special_erf(Tensor self) -> Tensor
- python_module: special
- variants: function
- - func: special_erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- python_module: special
- variants: function
- - func: special_erfc(Tensor self) -> Tensor
- python_module: special
- variants: function
- - func: special_erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- python_module: special
- - func: special_erfcx(Tensor self) -> Tensor
- python_module: special
- variants: function
- structured_delegate: special_erfcx.out
- tags: pointwise
- - func: special_erfcx.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- python_module: special
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: special_erfcx_out
- tags: pointwise
- - func: special_erfinv(Tensor self) -> Tensor
- python_module: special
- variants: function
- - func: special_erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- python_module: special
- - func: special_ndtr(Tensor self) -> Tensor
- python_module: special
- variants: function
- - func: special_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- python_module: special
- variants: function
- - func: special_xlog1py(Tensor self, Tensor other) -> Tensor
- device_check: NoCheck # TensorIterator
- python_module: special
- variants: function
- structured_delegate: special_xlog1py.out
- tags: pointwise
- - func: special_xlog1py.self_scalar(Scalar self, Tensor other) -> Tensor
- device_check: NoCheck # TensorIterator
- python_module: special
- variants: function
- dispatch:
- CompositeExplicitAutograd: special_xlog1py
- tags: pointwise
- - func: special_xlog1py.other_scalar(Tensor self, Scalar other) -> Tensor
- device_check: NoCheck # TensorIterator
- python_module: special
- variants: function
- dispatch:
- CompositeExplicitAutograd: special_xlog1py
- tags: pointwise
- - func: special_xlog1py.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- python_module: special
- variants: function
- dispatch:
- CPU, CUDA: special_xlog1py_out
- tags: pointwise
- - func: special_xlog1py.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- python_module: special
- variants: function
- dispatch:
- CompositeExplicitAutograd: special_xlog1py_out
- tags: pointwise
- - func: special_xlog1py.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- python_module: special
- variants: function
- dispatch:
- CompositeExplicitAutograd: special_xlog1py_out
- tags: pointwise
- - func: special_xlogy(Tensor self, Tensor other) -> Tensor
- device_check: NoCheck # TensorIterator
- python_module: special
- variants: function
- - func: special_xlogy.self_scalar(Scalar self, Tensor other) -> Tensor
- device_check: NoCheck # TensorIterator
- python_module: special
- variants: function
- - func: special_xlogy.other_scalar(Tensor self, Scalar other) -> Tensor
- device_check: NoCheck # TensorIterator
- python_module: special
- variants: function
- - func: special_xlogy.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- python_module: special
- variants: function
- - func: special_xlogy.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- python_module: special
- variants: function
- - func: special_xlogy.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- python_module: special
- variants: function
- - func: special_zeta(Tensor self, Tensor other) -> Tensor
- device_check: NoCheck # TensorIterator
- python_module: special
- variants: function
- structured_delegate: special_zeta.out
- tags: pointwise
- - func: special_zeta.self_scalar(Scalar self, Tensor other) -> Tensor
- device_check: NoCheck # TensorIterator
- python_module: special
- variants: function
- dispatch:
- CompositeExplicitAutograd: special_zeta
- tags: pointwise
- - func: special_zeta.other_scalar(Tensor self, Scalar other) -> Tensor
- device_check: NoCheck # TensorIterator
- python_module: special
- variants: function
- dispatch:
- CompositeExplicitAutograd: special_zeta
- tags: pointwise
- - func: special_zeta.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- structured: True
- structured_inherits: TensorIteratorBase
- python_module: special
- variants: function
- dispatch:
- CPU, CUDA: special_zeta_out
- tags: pointwise
- - func: special_zeta.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- python_module: special
- variants: function
- dispatch:
- CompositeExplicitAutograd: special_zeta_out
- tags: pointwise
- - func: special_zeta.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck # TensorIterator
- python_module: special
- variants: function
- dispatch:
- CompositeExplicitAutograd: special_zeta_out
- tags: pointwise
- - func: special_i0(Tensor self) -> Tensor
- python_module: special
- variants: function
- - func: special_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- python_module: special
- variants: function
- - func: special_i0e(Tensor self) -> Tensor
- python_module: special
- variants: function
- structured_delegate: special_i0e.out
- tags: pointwise
- - func: special_i0e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- python_module: special
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: special_i0e_out
- tags: pointwise
- - func: special_i1(Tensor self) -> Tensor
- python_module: special
- variants: function
- structured_delegate: special_i1.out
- tags: pointwise
- - func: special_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- python_module: special
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: special_i1_out
- tags: pointwise
- - func: special_i1e(Tensor self) -> Tensor
- python_module: special
- variants: function
- structured_delegate: special_i1e.out
- tags: pointwise
- - func: special_i1e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- python_module: special
- structured: True
- structured_inherits: TensorIteratorBase
- dispatch:
- CPU, CUDA: special_i1e_out
- tags: pointwise
- - func: special_logit(Tensor self, float? eps=None) -> Tensor
- python_module: special
- variants: function
- - func: special_logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)
- python_module: special
- - func: special_polygamma(int n, Tensor self) -> Tensor
- python_module: special
- variants: function
- - func: special_polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- python_module: special
- - func: special_logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
- python_module: special
- variants: function
- - func: special_logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
- python_module: special
- - func: special_expit(Tensor self) -> Tensor
- python_module: special
- variants: function
- - func: special_expit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- python_module: special
- variants: function
- - func: special_sinc(Tensor self) -> Tensor
- python_module: special
- variants: function
- - func: special_sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- python_module: special
- variants: function
- - func: special_round(Tensor self, *, int decimals=0) -> Tensor
- python_module: special
- variants: function
- - func: special_round.out(Tensor self, *, int decimals=0, Tensor(a!) out) -> Tensor(a!)
- python_module: special
- variants: function
- - func: special_log1p(Tensor self) -> Tensor
- python_module: special
- variants: function
- - func: special_log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- python_module: special
- variants: function
- - func: special_log_softmax(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor
- python_module: special
- variants: function
- - func: special_gammainc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- python_module: special
- variants: function
- - func: special_gammainc(Tensor self, Tensor other) -> Tensor
- python_module: special
- variants: function
- - func: special_gammaincc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- python_module: special
- variants: function
- - func: special_gammaincc(Tensor self, Tensor other) -> Tensor
- python_module: special
- variants: function
- - func: special_multigammaln(Tensor self, int p) -> Tensor
- python_module: special
- variants: function
- - func: special_multigammaln.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)
- python_module: special
- variants: function
- - func: special_softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
- python_module: special
- variants: function
- ## Functions related to the fast Fourier transform and the torch.fft namespace
- # Note [FFT namespace binding]
- # Functions in the fft python module should have their names start with
- # "fft_" underscore and be bound to the desired Python name in
- # torch/fft/__init__.py, and the desired C++ name in torch/csrc/api/include/torch/fft.h.
- # The "fft_" names should be hidden from the user and not documented.
- #
- # See fft_fft as an example.
- # torch.fft.fft
- # NOTE: NOT an alias for torch.fft, which has different semantics
- - func: fft_fft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor
- python_module: fft
- variants: function
- - func: fft_fft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
- python_module: fft
- variants: function
- - func: fft_ifft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor
- python_module: fft
- variants: function
- - func: fft_ifft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
- python_module: fft
- variants: function
- - func: fft_rfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor
- python_module: fft
- variants: function
- - func: fft_rfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
- python_module: fft
- variants: function
- - func: fft_irfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor
- python_module: fft
- variants: function
- - func: fft_irfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
- python_module: fft
- variants: function
- - func: fft_hfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor
- python_module: fft
- variants: function
- - func: fft_hfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
- python_module: fft
- variants: function
- - func: fft_ihfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor
- python_module: fft
- variants: function
- - func: fft_ihfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
- python_module: fft
- variants: function
- - func: fft_fft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
- python_module: fft
- variants: function
- - func: fft_fft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
- python_module: fft
- variants: function
- - func: fft_ifft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
- python_module: fft
- variants: function
- - func: fft_ifft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
- python_module: fft
- variants: function
- - func: fft_rfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
- python_module: fft
- variants: function
- - func: fft_rfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
- python_module: fft
- variants: function
- - func: fft_irfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
- python_module: fft
- variants: function
- - func: fft_irfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
- python_module: fft
- variants: function
- - func: fft_hfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
- use_const_ref_for_mutable_tensors: True
- python_module: fft
- variants: function
- - func: fft_hfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
- use_const_ref_for_mutable_tensors: True
- python_module: fft
- variants: function
- - func: fft_ihfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
- use_const_ref_for_mutable_tensors: True
- python_module: fft
- variants: function
- - func: fft_ihfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
- use_const_ref_for_mutable_tensors: True
- python_module: fft
- variants: function
- - func: fft_fftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
- python_module: fft
- variants: function
- - func: fft_fftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
- python_module: fft
- variants: function
- - func: fft_ifftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
- python_module: fft
- variants: function
- - func: fft_ifftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
- python_module: fft
- variants: function
- - func: fft_rfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
- python_module: fft
- variants: function
- - func: fft_rfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
- python_module: fft
- variants: function
- - func: fft_irfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
- python_module: fft
- variants: function
- - func: fft_irfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
- python_module: fft
- variants: function
- - func: fft_hfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
- use_const_ref_for_mutable_tensors: True
- python_module: fft
- variants: function
- - func: fft_hfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
- use_const_ref_for_mutable_tensors: True
- python_module: fft
- variants: function
- - func: fft_ihfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
- use_const_ref_for_mutable_tensors: True
- python_module: fft
- variants: function
- - func: fft_ihfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
- use_const_ref_for_mutable_tensors: True
- python_module: fft
- variants: function
- - func: fft_fftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- python_module: fft
- variants: function
- dispatch:
- CompositeExplicitAutograd: fft_fftfreq
- - func: fft_fftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!)
- python_module: fft
- variants: function
- dispatch:
- CompositeExplicitAutograd: fft_fftfreq_out
- - func: fft_rfftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- python_module: fft
- variants: function
- dispatch:
- CompositeExplicitAutograd: fft_rfftfreq
- - func: fft_rfftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!)
- python_module: fft
- variants: function
- dispatch:
- CompositeExplicitAutograd: fft_rfftfreq_out
- - func: fft_fftshift(Tensor self, int[1]? dim=None) -> Tensor
- python_module: fft
- variants: function
- - func: fft_ifftshift(Tensor self, int[1]? dim=None) -> Tensor
- python_module: fft
- variants: function
- ## Functions for linear algebra and the torch.linalg namespace
- # Note [linalg namespace binding]
- # Functions in the linalg python module should have their names start with
- # "linalg_" and be bound to the desired Python name in
- # torch/linalg/__init__.py, and the desired C++ name in torch/csrc/api/include/torch/linalg.h.
- # The "linalg_" names should be hidden from the user and not documented.
- #
- # See linalg_det as an example.
- # "_ex" stands for experimental
- - func: linalg_cholesky_ex(Tensor self, *, bool upper=False, bool check_errors=False) -> (Tensor L, Tensor info)
- python_module: linalg
- structured_delegate: linalg_cholesky_ex.L
- - func: linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info)
- python_module: linalg
- structured: True
- dispatch:
- CPU, CUDA: linalg_cholesky_ex_out
- - func: linalg_cholesky(Tensor self, *, bool upper=False) -> Tensor
- python_module: linalg
- - func: linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!)
- python_module: linalg
- - func: linalg_cross(Tensor self, Tensor other, *, int dim=-1) -> Tensor
- python_module: linalg
- variants: function
- structured_delegate: linalg_cross.out
- dispatch:
- ZeroTensor: linalg_cross_zerotensor
- - func: linalg_cross.out(Tensor self, Tensor other, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)
- python_module: linalg
- structured: True
- dispatch:
- CPU, CUDA, MPS: linalg_cross_out
- # linalg.lu_factor
- - func: linalg_lu_factor(Tensor A, *, bool pivot=True) -> (Tensor LU, Tensor pivots)
- python_module: linalg
- variants: function
- - func: linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots)
- python_module: linalg
- variants: function
- - func: linalg_lu_factor_ex(Tensor A, *, bool pivot=True, bool check_errors=False) -> (Tensor LU, Tensor pivots, Tensor info)
- python_module: linalg
- structured_delegate: linalg_lu_factor_ex.out
- variants: function
- - func: linalg_lu_factor_ex.out(Tensor A, *, bool pivot=True, bool check_errors=False, Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info)
- python_module: linalg
- variants: function
- structured: True
- dispatch:
- CPU, CUDA: linalg_lu_factor_ex_out
- # linalg.lu
- - func: linalg_lu(Tensor A, *, bool pivot=True) -> (Tensor P, Tensor L, Tensor U)
- python_module: linalg
- structured_delegate: linalg_lu.out
- variants: function
- - func: linalg_lu.out(Tensor A, *, bool pivot=True, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U)
- python_module: linalg
- variants: function
- structured: True
- dispatch:
- CPU, CUDA: linalg_lu_out
- # linalg.lu_solve
- - func: linalg_lu_solve(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False) -> Tensor
- python_module: linalg
- structured_delegate: linalg_lu_solve.out
- variants: function
- - func: linalg_lu_solve.out(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False, Tensor(a!) out) -> Tensor(a!)
- python_module: linalg
- variants: function
- structured: True
- dispatch:
- CPU, CUDA: linalg_lu_solve_out
- # linalg.det
- - func: _linalg_det(Tensor A) -> (Tensor result, Tensor LU, Tensor pivots)
- structured_delegate: _linalg_det.result
- - func: _linalg_det.result(Tensor A, *, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots)
- structured: True
- dispatch:
- CPU, CUDA: _linalg_det_out
- - func: linalg_det(Tensor A) -> Tensor
- python_module: linalg
- variants: function
- - func: linalg_det.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)
- python_module: linalg
- # torch.det, alias for torch.linalg.det
- - func: det(Tensor self) -> Tensor
- variants: function, method
- - func: linalg_ldl_factor_ex(Tensor self, *, bool hermitian=False, bool check_errors=False) -> (Tensor LD, Tensor pivots, Tensor info)
- structured_delegate: linalg_ldl_factor_ex.out
- python_module: linalg
- variants: function
- - func: linalg_ldl_factor_ex.out(Tensor self, *, bool hermitian=False, bool check_errors=False, Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info)
- structured: True
- python_module: linalg
- variants: function
- dispatch:
- CPU, CUDA: linalg_ldl_factor_ex_out
- - func: linalg_ldl_factor(Tensor self, *, bool hermitian=False) -> (Tensor LD, Tensor pivots)
- python_module: linalg
- variants: function
- - func: linalg_ldl_factor.out(Tensor self, *, bool hermitian=False, Tensor(a!) LD, Tensor(b!) pivots) -> (Tensor(a!) LD, Tensor(b!) pivots)
- python_module: linalg
- variants: function
- - func: linalg_ldl_solve(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False) -> Tensor
- structured_delegate: linalg_ldl_solve.out
- python_module: linalg
- variants: function
- - func: linalg_ldl_solve.out(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
- structured: True
- python_module: linalg
- variants: function
- dispatch:
- CPU, CUDA: linalg_ldl_solve_out
- - func: linalg_lstsq(Tensor self, Tensor b, float? rcond=None, *, str? driver=None) -> (Tensor solution, Tensor residuals, Tensor rank, Tensor singular_values)
- python_module: linalg
- variants: function
- dispatch:
- CompositeExplicitAutograd: linalg_lstsq
- tags: dynamic_output_shape
- - func: linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values)
- python_module: linalg
- variants: function
- dispatch:
- CPU, CUDA: linalg_lstsq_out
- tags: dynamic_output_shape
- # torch.linalg.matmul, alias for torch.matmul
- - func: linalg_matmul(Tensor self, Tensor other) -> Tensor
- python_module: linalg
- variants: function
- - func: linalg_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- python_module: linalg
- - func: linalg_vecdot(Tensor x, Tensor y, *, int dim=-1) -> Tensor
- python_module: linalg
- variants: function
- - func: linalg_vecdot.out(Tensor x, Tensor y, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)
- python_module: linalg
- - func: linalg_matrix_exp(Tensor self) -> Tensor
- python_module: linalg
- variants: function
- dispatch:
- CPU, CUDA: linalg_matrix_exp
- autogen: linalg_matrix_exp.out
- - func: _linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet, Tensor LU, Tensor pivots)
- structured_delegate: _linalg_slogdet.sign
- - func: _linalg_slogdet.sign(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots) -> (Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots)
- structured: True
- dispatch:
- CPU, CUDA: _linalg_slogdet_out
- - func: linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet)
- python_module: linalg
- - func: linalg_slogdet.out(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)
- python_module: linalg
- - func: slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet)
- variants: function, method
- - func: slogdet.out(Tensor self, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)
- variants: function
- - func: logdet(Tensor self) -> Tensor
- variants: function, method
- - func: linalg_eig(Tensor self) -> (Tensor eigenvalues, Tensor eigenvectors)
- python_module: linalg
- variants: function
- dispatch:
- CPU, CUDA: linalg_eig
- - func: linalg_eig.out(Tensor self, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
- python_module: linalg
- dispatch:
- CPU, CUDA: linalg_eig_out
- - func: linalg_eigvals(Tensor self) -> Tensor
- python_module: linalg
- - func: linalg_eigvals.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- python_module: linalg
- # This function is exposes the `compute_v` flag, which is then used to implement `linalg.eigh` and
- # `linalg.eigvalsh` as composite functions that call this one
- - func: _linalg_eigh(Tensor A, str UPLO="L", bool compute_v=True) -> (Tensor eigenvalues, Tensor eigenvectors)
- structured_delegate: _linalg_eigh.eigenvalues
- - func: _linalg_eigh.eigenvalues(Tensor A, str UPLO="L", bool compute_v=True, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
- structured: True
- dispatch:
- CPU, CUDA: _linalg_eigh_out
- - func: linalg_eigh(Tensor self, str UPLO="L") -> (Tensor eigenvalues, Tensor eigenvectors)
- python_module: linalg
- - func: linalg_eigh.eigvals(Tensor self, str UPLO="L", *, Tensor(a!) eigvals, Tensor(b!) eigvecs) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
- python_module: linalg
- - func: linalg_eigvalsh(Tensor self, str UPLO="L") -> Tensor
- python_module: linalg
- - func: linalg_eigvalsh.out(Tensor self, str UPLO="L", *, Tensor(a!) out) -> Tensor(a!)
- python_module: linalg
- - func: linalg_householder_product(Tensor input, Tensor tau) -> Tensor
- python_module: linalg
- variants: function
- dispatch:
- CPU, CUDA: linalg_householder_product
- - func: linalg_householder_product.out(Tensor input, Tensor tau, *, Tensor(a!) out) -> Tensor(a!)
- python_module: linalg
- dispatch:
- CPU, CUDA: linalg_householder_product_out
- - func: linalg_inv_ex(Tensor A, *, bool check_errors=False) -> (Tensor inverse, Tensor info)
- python_module: linalg
- structured_delegate: linalg_inv_ex.inverse
- - func: linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info)
- python_module: linalg
- structured: True
- dispatch:
- CPU, CUDA: linalg_inv_ex_out
- MPS: linalg_inv_ex_out_mps
- - func: linalg_inv(Tensor A) -> Tensor
- python_module: linalg
- - func: linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)
- python_module: linalg
- - func: inverse(Tensor self) -> Tensor
- variants: function, method
- - func: inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- - func: inner(Tensor self, Tensor other) -> Tensor
- variants: function, method
- - func: inner.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- - func: outer(Tensor self, Tensor vec2) -> Tensor
- variants: function, method
- - func: outer.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)
- # torch.ger, alias for torch.outer
- - func: ger(Tensor self, Tensor vec2) -> Tensor
- variants: function, method
- - func: ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)
- - func: linalg_norm(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
- python_module: linalg
- variants: function
- - func: linalg_norm.ord_str(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
- python_module: linalg
- variants: function
- - func: linalg_norm.out(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
- python_module: linalg
- variants: function
- - func: linalg_norm.ord_str_out(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
- python_module: linalg
- variants: function
- - func: linalg_vector_norm(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
- python_module: linalg
- variants: function
- structured_delegate: linalg_vector_norm.out
- - func: linalg_vector_norm.out(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
- python_module: linalg
- structured: True
- dispatch:
- CPU, CUDA: linalg_vector_norm_out
- - func: linalg_matrix_norm(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
- python_module: linalg
- - func: linalg_matrix_norm.out(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
- python_module: linalg
- - func: linalg_matrix_norm.str_ord(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
- python_module: linalg
- - func: linalg_matrix_norm.str_ord_out(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
- python_module: linalg
- # This function is exposes the `compute_uv` flag, which is then used to implement `linalg.svd` and
- # `linalg.svdvals` as composite functions that call this one
- - func: _linalg_svd(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh)
- variants: function
- structured_delegate: _linalg_svd.U
- - func: _linalg_svd.U(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)
- structured: True
- dispatch:
- CPU, CUDA: _linalg_svd_out
- - func: linalg_svd(Tensor A, bool full_matrices=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh)
- python_module: linalg
- variants: function
- - func: linalg_svd.U(Tensor A, bool full_matrices=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)
- python_module: linalg
- variants: function
- - func: linalg_svdvals(Tensor A, *, str? driver=None) -> Tensor
- python_module: linalg
- variants: function
- - func: linalg_svdvals.out(Tensor A, *, str? driver=None, Tensor(a!) out) -> Tensor(a!)
- python_module: linalg
- variants: function
- - func: linalg_cond(Tensor self, Scalar? p=None) -> Tensor
- python_module: linalg
- variants: function
- - func: linalg_cond.out(Tensor self, Scalar? p=None, *, Tensor(a!) out) -> Tensor(a!)
- python_module: linalg
- variants: function
- - func: linalg_cond.p_str(Tensor self, str p) -> Tensor
- python_module: linalg
- variants: function
- - func: linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!)
- python_module: linalg
- variants: function
- - func: linalg_pinv.atol_rtol_tensor(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor
- python_module: linalg
- variants: function
- dispatch:
- # calls svd, which calls mH() (view op)
- # also calls narrow()
- CompositeExplicitAutogradNonFunctional: linalg_pinv
- - func: linalg_pinv.atol_rtol_tensor_out(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
- python_module: linalg
- variants: function
- dispatch:
- CompositeExplicitAutograd: linalg_pinv_out
- - func: linalg_pinv.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor
- cpp_no_default_args: ['atol', 'rtol']
- python_module: linalg
- variants: function
- - func: linalg_pinv.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
- cpp_no_default_args: ['atol', 'rtol']
- python_module: linalg
- variants: function
- - func: linalg_pinv(Tensor self, float rcond, bool hermitian=False) -> Tensor
- python_module: linalg
- variants: function
- - func: linalg_pinv.rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False) -> Tensor
- python_module: linalg
- variants: function
- - func: linalg_pinv.out(Tensor self, float rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
- python_module: linalg
- variants: function
- - func: linalg_pinv.out_rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
- python_module: linalg
- variants: function
- - func: _linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor LU, Tensor pivots, Tensor info)
- structured_delegate: _linalg_solve_ex.result
- - func: _linalg_solve_ex.result(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info)
- structured: True
- dispatch:
- CPU, CUDA: _linalg_solve_ex_out
- - func: linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor info)
- python_module: linalg
- - func: linalg_solve_ex.out(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) info) -> (Tensor(a!) result, Tensor(b!) info)
- python_module: linalg
- - func: linalg_solve(Tensor A, Tensor B, *, bool left=True) -> Tensor
- python_module: linalg
- - func: linalg_solve.out(Tensor A, Tensor B, *, bool left=True, Tensor(a!) out) -> Tensor(a!)
- python_module: linalg
- - func: linalg_tensorinv(Tensor self, int ind=2) -> Tensor
- python_module: linalg
- variants: function
- - func: linalg_tensorinv.out(Tensor self, int ind=2, *, Tensor(a!) out) -> Tensor(a!)
- python_module: linalg
- variants: function
- - func: linalg_tensorsolve(Tensor self, Tensor other, int[]? dims=None) -> Tensor
- python_module: linalg
- variants: function
- - func: linalg_tensorsolve.out(Tensor self, Tensor other, int[]? dims=None, *, Tensor(a!) out) -> Tensor(a!)
- python_module: linalg
- variants: function
- - func: linalg_qr(Tensor A, str mode='reduced') -> (Tensor Q, Tensor R)
- python_module: linalg
- variants: function
- structured_delegate: linalg_qr.out
- - func: linalg_qr.out(Tensor A, str mode='reduced', *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)
- python_module: linalg
- structured: True
- dispatch:
- CPU, CUDA: linalg_qr_out
- - func: linalg_matrix_power(Tensor self, int n) -> Tensor
- python_module: linalg
- - func: linalg_matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!)
- python_module: linalg
- - func: linalg_matrix_rank.atol_rtol_tensor(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor
- python_module: linalg
- variants: function
- - func: linalg_matrix_rank.atol_rtol_tensor_out(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
- python_module: linalg
- variants: function
- - func: linalg_matrix_rank.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor
- cpp_no_default_args: ['atol', 'rtol']
- python_module: linalg
- variants: function
- - func: linalg_matrix_rank.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
- cpp_no_default_args: ['atol', 'rtol']
- python_module: linalg
- variants: function
- - func: linalg_matrix_rank(Tensor self, float tol, bool hermitian=False) -> Tensor
- python_module: linalg
- variants: function
- - func: linalg_matrix_rank.out(Tensor self, float tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
- python_module: linalg
- variants: function
- - func: linalg_matrix_rank.tol_tensor(Tensor input, Tensor tol, bool hermitian=False) -> Tensor
- python_module: linalg
- variants: function
- - func: linalg_matrix_rank.out_tol_tensor(Tensor input, Tensor tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
- python_module: linalg
- variants: function
- - func: linalg_multi_dot(Tensor[] tensors) -> Tensor
- python_module: linalg
- - func: linalg_multi_dot.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
- python_module: linalg
- ## Functions related to the `torch.nested` namespace
- # Note [nested namespace binding]
- # Functions in the nested python module should have their names start with
- # "nested_" underscore and be bound to the desired Python name in
- # torch/nested/__init__.py, and the desired C++ name in torch/csrc/api/include/torch/nested.h.
- # The "nested_" names should be hidden from the user and not documented.
- - func: nested_to_padded_tensor(Tensor self, float padding, int[]? output_size=None) -> Tensor
- python_module: nested
- variants: function
- ## Functions that are only for testing
- # It is undocumented and should not be used outside of tests.
- - func: _test_serialization_subcmul(Tensor self, Tensor other, Scalar alpha=1) -> Tensor
- # Note: this function is only for testing.
- - func: _test_optional_intlist(Tensor values, int[]? addends) -> Tensor
- python_module: nn
- dispatch:
- CPU: _test_optional_intlist
- autogen: _test_optional_intlist.out
- # Note: this function is only for testing.
- - func: _test_optional_filled_intlist(Tensor values, int[2]? addends) -> Tensor
- python_module: nn
- dispatch:
- CPU: _test_optional_intlist
- autogen: _test_optional_filled_intlist.out
- # Note: this function is only for testing.
- - func: _test_optional_floatlist(Tensor values, float[]? addends) -> Tensor
- python_module: nn
- dispatch:
- CPU: _test_optional_floatlist
- autogen: _test_optional_floatlist.out
- # Note: this function is only for testing.
- - func: _test_string_default(Tensor dummy, str a="\"'\\", str b='"\'\\') -> Tensor
- python_module: nn
- # Note: this function is only for testing.
- - func: _test_ambiguous_defaults.a(Tensor dummy, int a=1, int b=1) -> Tensor
- python_module: nn
- # Note: this function is only for testing.
- - func: _test_ambiguous_defaults.b(Tensor dummy, int a=2, str b="2") -> Tensor
- cpp_no_default_args: ['a', 'b']
- python_module: nn
- # Note: this function is only for testing.
- - func: _test_warn_in_autograd(Tensor self) -> Tensor
- python_module: nn
- dispatch:
- CompositeExplicitAutograd: _test_warn_in_autograd
- autogen: _test_warn_in_autograd.out
- # Note: this function is only for testing.
- - func: _test_autograd_multiple_dispatch.fullcoverage(Tensor self) -> Tensor
- dispatch:
- # the NestedTensor keys are necessary because NestedTensor has been removed
- # from the CompositeExplicitAutograd keyset see Note [NestedTensor Not Included in Backend Keys]
- CompositeExplicitAutograd, NestedTensorCPU, NestedTensorCUDA: _test_autograd_multiple_dispatch_fullcoverage
- autogen: _test_autograd_multiple_dispatch.fullcoverage_out
- # Note: this function is only for testing.
- - func: _test_autograd_multiple_dispatch.ntonly(Tensor self, bool b) -> Tensor
- dispatch:
- CompositeImplicitAutograd, NestedTensorCPU, NestedTensorCUDA: _test_autograd_multiple_dispatch_ntonly
- # Note: this function is only for testing.
- - func: _test_autograd_multiple_dispatch_view(Tensor(a) self) -> Tensor(a)
- dispatch:
- CompositeExplicitAutograd: _test_autograd_multiple_dispatch_view
- # Note: this function is only for testing.
- - func: _test_autograd_multiple_dispatch_view_copy(Tensor self) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutogradNonFunctional: _test_autograd_multiple_dispatch_view_copy
- tags: view_copy
- autogen: _test_autograd_multiple_dispatch_view_copy.out
- - func: segment_reduce(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None) -> Tensor
- variants: function
- dispatch:
- CPU, CUDA: segment_reduce_kernel
- autogen: segment_reduce.out
- - func: _segment_reduce_backward(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None) -> Tensor
- variants: function
- dispatch:
- CPU, CUDA: _segment_reduce_backward_kernel
- autogen: _segment_reduce_backward.out
- - func: pad_sequence(Tensor[] sequences, bool batch_first=False, float padding_value=0.0) -> Tensor
- python_module: nn
- variants: function
- - func: flatten_dense_tensors(Tensor[] tensors) -> Tensor
- variants: function
- python_module: nn
- - func: unflatten_dense_tensors(Tensor flat, Tensor[] tensors) -> Tensor[]
- variants: function
- python_module: nn
- - func: _nested_tensor_from_tensor_list(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutograd: _nested_tensor_from_tensor_list
- autogen: _nested_tensor_from_tensor_list.out
- - func: _fw_primal_copy(Tensor self, int level) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutogradNonFunctional: _fw_primal_copy
- tags: view_copy
- autogen: _fw_primal_copy.out
- - func: _make_dual_copy(Tensor primal, Tensor tangent, int level) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutogradNonFunctional: _make_dual_copy
- tags: view_copy
- autogen: _make_dual_copy.out
- - func: view_as_real_copy(Tensor self) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutogradNonFunctional: view_as_real_copy
- tags: view_copy
- autogen: view_as_real_copy.out
- - func: view_as_complex_copy(Tensor self) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutogradNonFunctional: view_as_complex_copy
- tags: view_copy
- autogen: view_as_complex_copy.out
- - func: _conj_copy(Tensor self) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutogradNonFunctional: _conj_copy
- tags: view_copy
- autogen: _conj_copy.out
- - func: _neg_view_copy(Tensor self) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutogradNonFunctional: _neg_view_copy
- tags: view_copy
- autogen: _neg_view_copy.out
- - func: as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutogradNonFunctional: as_strided_copy_symint
- tags: view_copy
- autogen: as_strided_copy.out
- - func: _sparse_broadcast_to_copy(Tensor self, int[] size) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutogradNonFunctional: _sparse_broadcast_to_copy
- tags: view_copy
- autogen: _sparse_broadcast_to_copy.out
- - func: diagonal_copy(Tensor self, int offset=0, int dim1=0, int dim2=1) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutogradNonFunctional: diagonal_copy
- tags: view_copy
- autogen: diagonal_copy.out
- - func: expand_copy(Tensor self, SymInt[] size, *, bool implicit=False) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutogradNonFunctional: expand_copy_symint
- tags: view_copy
- autogen: expand_copy.out
- - func: permute_copy(Tensor self, int[] dims) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutogradNonFunctional: permute_copy
- tags: view_copy
- autogen: permute_copy.out
- - func: _reshape_alias_copy(Tensor self, SymInt[] size, SymInt[] stride) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutogradNonFunctional: _reshape_alias_copy_symint
- tags: view_copy
- autogen: _reshape_alias_copy.out
- - func: select_copy.int(Tensor self, int dim, SymInt index) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutogradNonFunctional: select_copy_symint
- SparseCsrCPU, SparseCsrCUDA: select_copy_sparse_csr
- tags: view_copy
- autogen: select_copy.int_out
- - func: detach_copy(Tensor self) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutogradNonFunctional: detach_copy
- tags: view_copy
- autogen: detach_copy.out
- - func: slice_copy.Tensor(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutogradNonFunctional: slice_copy_Tensor_symint
- tags: view_copy
- autogen: slice_copy.Tensor_out
- - func: split_copy.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]
- variants: function
- dispatch:
- CompositeExplicitAutogradNonFunctional: split_copy_Tensor_symint
- tags: view_copy
- - func: split_with_sizes_copy(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]
- variants: function
- dispatch:
- CompositeExplicitAutogradNonFunctional: split_with_sizes_copy_symint
- tags: view_copy
- - func: squeeze_copy(Tensor self) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutogradNonFunctional: squeeze_copy
- tags: view_copy
- autogen: squeeze_copy.out
- - func: squeeze_copy.dim(Tensor self, int dim) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutogradNonFunctional: squeeze_copy_dim
- tags: view_copy
- autogen: squeeze_copy.dim_out
- - func: squeeze_copy.dims(Tensor self, int[] dim) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutogradNonFunctional: squeeze_copy_dims
- tags: view_copy
- autogen: squeeze_copy.dims_out
- - func: t_copy(Tensor self) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutogradNonFunctional: t_copy
- tags: view_copy
- autogen: t_copy.out
- - func: transpose_copy.int(Tensor self, int dim0, int dim1) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutogradNonFunctional: transpose_copy_int
- tags: view_copy
- autogen: transpose_copy.int_out
- - func: unsqueeze_copy(Tensor self, int dim) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutogradNonFunctional: unsqueeze_copy
- tags: view_copy
- autogen: unsqueeze_copy.out
- - func: _indices_copy(Tensor self) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutogradNonFunctional: _indices_copy
- tags: view_copy
- autogen: _indices_copy.out
- - func: _values_copy(Tensor self) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutogradNonFunctional: _values_copy
- tags: view_copy
- autogen: _values_copy.out
- - func: indices_copy(Tensor self) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutogradNonFunctional: indices_copy
- tags: view_copy
- autogen: indices_copy.out
- - func: values_copy(Tensor self) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutogradNonFunctional: values_copy
- tags: view_copy
- autogen: values_copy.out
- - func: crow_indices_copy(Tensor self) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutogradNonFunctional: crow_indices_copy
- tags: view_copy
- autogen: crow_indices_copy.out
- - func: col_indices_copy(Tensor self) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutogradNonFunctional: col_indices_copy
- tags: view_copy
- autogen: col_indices_copy.out
- - func: ccol_indices_copy(Tensor self) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutogradNonFunctional: ccol_indices_copy
- tags: view_copy
- autogen: ccol_indices_copy.out
- - func: row_indices_copy(Tensor self) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutogradNonFunctional: row_indices_copy
- tags: view_copy
- autogen: row_indices_copy.out
- - func: unbind_copy.int(Tensor self, int dim=0) -> Tensor[]
- variants: function
- dispatch:
- CompositeExplicitAutogradNonFunctional: unbind_copy_int
- tags: view_copy
- - func: unbind_copy.int_out(Tensor self, int dim=0, *, Tensor(a!)[] out) -> ()
- variants: function
- dispatch:
- CompositeExplicitAutograd: unbind_copy_int_out
- - func: split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()
- variants: function
- dispatch:
- CompositeExplicitAutograd: split_copy_Tensor_out
- - func: split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()
- variants: function
- dispatch:
- CompositeExplicitAutograd: split_with_sizes_copy_out
- - func: view_copy(Tensor self, SymInt[] size) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutogradNonFunctional: view_copy_symint
- tags: view_copy
- autogen: view_copy.out
- - func: view_copy.dtype(Tensor self, ScalarType dtype) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutogradNonFunctional: view_copy_dtype
- tags: view_copy
- autogen: view_copy.dtype_out
- - func: unfold_copy(Tensor self, int dimension, int size, int step) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutogradNonFunctional: unfold_copy
- tags: view_copy
- autogen: unfold_copy.out
- - func: alias_copy(Tensor self) -> Tensor
- variants: function
- dispatch:
- CompositeExplicitAutogradNonFunctional: alias_copy
- tags: view_copy
- autogen: alias_copy.out
- - func: to_padded_tensor(Tensor self, float padding, SymInt[]? output_size=None) -> Tensor
- variants: method
- dispatch:
- NestedTensorCPU: NestedTensor_to_padded_tensor_generic
- NestedTensorCUDA: NestedTensor_to_padded_tensor_cuda
- autogen: to_padded_tensor.out
- - func: _nested_tensor_softmax_with_shape(Tensor self, Tensor query) -> Tensor
- dispatch:
- NestedTensorCPU: NestedTensor_softmax_dropout
- NestedTensorCUDA: NestedTensor_softmax_dropout_cuda
- # Apparently, putting "forward" in the name will cause Python bindings to be skipped, so "fwd" it is.
- - func: _transformer_encoder_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None) -> Tensor
- variants: function
- dispatch:
- CPU, CUDA, NestedTensorCPU, NestedTensorCUDA: transformer_encoder_layer_forward
- autogen: _transformer_encoder_layer_fwd.out
- - func: _native_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None) -> (Tensor, Tensor)
- variants: function
- dispatch:
- CPU, NestedTensorCPU: native_multi_head_attention_cpu
- CUDA, NestedTensorCUDA: native_multi_head_attention_cuda
- autogen: _native_multi_head_attention.out
- - func: scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False) -> Tensor
- python_module: nn
- variants: function
- autogen: scaled_dot_product_attention.out
- # TODO: THIS NEEDS TO BE REMOVED BUT PEOPLE HAVE TRAINED THEIR MODELS WITH THIS OP BUILTIN
- - func: _scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool need_attn_weights=False, bool is_causal=False) -> (Tensor, Tensor)
- python_module: nn
- variants: function
- autogen: _scaled_dot_product_attention.out
- # This aten function is kept so that we can test the choice function from Python
- - func: _fused_sdp_choice(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False) -> int
- dispatch:
- Meta: _fused_sdp_choice_meta
- CPU, NestedTensorCPU: _fused_sdp_choice_cpp
- CUDA, NestedTensorCUDA: _fused_sdp_choice_cuda
- - func: _scaled_dot_product_attention_math(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, Tensor? dropout_mask=None) -> (Tensor, Tensor)
- variants: function
- - func: _scaled_dot_product_flash_attention(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False) -> (Tensor ouput, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, int philox_seed, int philox_offset, Tensor debug_attn_mask)
- dispatch:
- CUDA: _scaled_dot_product_flash_attention_cuda
- NestedTensorCUDA: _scaled_dot_product_flash_attention_nestedtensor_cuda
- - func: _scaled_dot_product_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, float dropout_p, bool is_causal, int philox_seed, int philox_offset) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value)
- variants: function
- dispatch:
- CUDA: _scaled_dot_product_flash_attention_backward_cuda
- - func: _scaled_dot_product_efficient_attention(Tensor query, Tensor key, Tensor value, bool compute_log_sumexp, bool is_causal=False) -> (Tensor, Tensor)
- dispatch:
- CUDA: _scaled_dot_product_efficient_attention_cuda
- NestedTensorCUDA: _scaled_dot_product_efficient_attention_nestedtensor_cuda
- - func: _scaled_dot_product_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, bool is_causal=False, bool chunk_grad_outputs=False) -> (Tensor, Tensor, Tensor)
- dispatch:
- CUDA: _scaled_dot_product_efficient_attention_backward_cuda
- - func: _chunk_grad_outputs_efficient_attention(Tensor query, Tensor key, Tensor value, bool is_causal=False) -> bool
- dispatch:
- CUDA: _chunk_grad_outputs_efficient_attention
- - func: _flash_attention_forward(Tensor query, Tensor key, Tensor value, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, float dropout_p, bool is_causal, bool return_debug_mask) -> (Tensor output, Tensor softmax_logsumexp, int philox_seed, int philox_offset, Tensor debug_attn_mask)
- variants: function
- dispatch:
- CUDA: _flash_attention_forward
- - func: _flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, float dropout_p, bool is_causal, int philox_seed, int philox_offset) -> (Tensor, Tensor, Tensor)
- variants: function
- dispatch:
- CUDA: _flash_attention_backward
- # Returns ouput, logsumexp if compute_logsumexp
- - func: _efficient_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? cu_seqlens_q, Tensor? cu_seqlens_k, int? max_seqlen_q, bool compute_log_sumexp=False, bool causal=False) -> (Tensor, Tensor)
- variants: function
- dispatch:
- CUDA: _efficient_attention_forward
- - func: _efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, bool is_causal=False, bool chunk_grad_outputs=False) -> (Tensor, Tensor, Tensor)
- variants: function
- dispatch:
- CUDA: _efficient_attention_backward
- - func: _triton_scaled_dot_attention(Tensor q, Tensor k, Tensor v, float dropout_p=0.0) -> Tensor
- variants: function
- dispatch:
- CUDA: triton_scaled_dot_attention
- autogen: _triton_scaled_dot_attention.out
- - func: _triton_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None) -> Tensor
- variants: function
- dispatch:
- CUDA: triton_multi_head_attention
- autogen: _triton_multi_head_attention.out
- - func: special_airy_ai(Tensor x) -> Tensor
- python_module: special
- structured_delegate: special_airy_ai.out
- variants: function
- tags: pointwise
- - func: special_airy_ai.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU, CUDA: special_airy_ai_out
- python_module: special
- structured_inherits: TensorIteratorBase
- structured: True
- variants: function
- tags: pointwise
- - func: _transformer_decoder_only_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None) -> (Tensor, Tensor, Tensor)
- variants: function
- dispatch:
- CPU, CUDA, NestedTensorCPU, NestedTensorCUDA: transformer_decoder_only_layer_forward
- autogen: _transformer_decoder_only_layer_fwd.out
- - func: _native_decoder_only_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None, bool need_weights=True, bool average_attn_weights=True) -> (Tensor, Tensor, Tensor, Tensor)
- variants: function
- dispatch:
- CPU, CUDA, NestedTensorCPU, NestedTensorCUDA: native_decoder_only_multi_head_attention
- autogen: _native_decoder_only_multi_head_attention.out
- - func: special_bessel_j0(Tensor self) -> Tensor
- python_module: special
- structured_delegate: special_bessel_j0.out
- variants: function
- tags: pointwise
- - func: special_bessel_j0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU, CUDA: special_bessel_j0_out
- python_module: special
- structured_inherits: TensorIteratorBase
- structured: True
- variants: function
- tags: pointwise
- - func: special_bessel_j1(Tensor self) -> Tensor
- python_module: special
- structured_delegate: special_bessel_j1.out
- variants: function
- tags: pointwise
- - func: special_bessel_j1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU, CUDA: special_bessel_j1_out
- python_module: special
- structured_inherits: TensorIteratorBase
- structured: True
- variants: function
- tags: pointwise
- - func: special_bessel_y0(Tensor self) -> Tensor
- python_module: special
- structured_delegate: special_bessel_y0.out
- variants: function
- tags: pointwise
- - func: special_bessel_y0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU, CUDA: special_bessel_y0_out
- python_module: special
- structured_inherits: TensorIteratorBase
- structured: True
- variants: function
- tags: pointwise
- - func: special_bessel_y1(Tensor self) -> Tensor
- python_module: special
- structured_delegate: special_bessel_y1.out
- variants: function
- tags: pointwise
- - func: special_bessel_y1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU, CUDA: special_bessel_y1_out
- python_module: special
- structured_inherits: TensorIteratorBase
- structured: True
- variants: function
- tags: pointwise
- - func: special_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor
- device_check: NoCheck
- python_module: special
- structured_delegate: special_chebyshev_polynomial_t.out
- variants: function
- tags: pointwise
- - func: special_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck
- dispatch:
- CPU, CUDA: special_chebyshev_polynomial_t_out
- python_module: special
- structured_inherits: TensorIteratorBase
- structured: True
- variants: function
- tags: pointwise
- - func: special_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CompositeExplicitAutograd: special_chebyshev_polynomial_t_out
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor
- device_check: NoCheck
- python_module: special
- structured_delegate: special_chebyshev_polynomial_u.out
- variants: function
- tags: pointwise
- - func: special_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck
- dispatch:
- CPU, CUDA: special_chebyshev_polynomial_u_out
- python_module: special
- structured_inherits: TensorIteratorBase
- structured: True
- variants: function
- tags: pointwise
- - func: special_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CompositeExplicitAutograd: special_chebyshev_polynomial_u_out
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor
- device_check: NoCheck
- python_module: special
- structured_delegate: special_chebyshev_polynomial_v.out
- variants: function
- tags: pointwise
- - func: special_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck
- dispatch:
- CPU, CUDA: special_chebyshev_polynomial_v_out
- python_module: special
- structured_inherits: TensorIteratorBase
- structured: True
- variants: function
- tags: pointwise
- - func: special_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CompositeExplicitAutograd: special_chebyshev_polynomial_v_out
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor
- device_check: NoCheck
- python_module: special
- structured_delegate: special_chebyshev_polynomial_w.out
- variants: function
- tags: pointwise
- - func: special_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck
- dispatch:
- CPU, CUDA: special_chebyshev_polynomial_w_out
- python_module: special
- structured_inherits: TensorIteratorBase
- structured: True
- variants: function
- tags: pointwise
- - func: special_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CompositeExplicitAutograd: special_chebyshev_polynomial_w_out
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_hermite_polynomial_h(Tensor x, Tensor n) -> Tensor
- device_check: NoCheck
- python_module: special
- structured_delegate: special_hermite_polynomial_h.out
- variants: function
- tags: pointwise
- - func: special_hermite_polynomial_h.x_scalar(Scalar x, Tensor n) -> Tensor
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_hermite_polynomial_h.n_scalar(Tensor x, Scalar n) -> Tensor
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_hermite_polynomial_h.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck
- dispatch:
- CPU, CUDA: special_hermite_polynomial_h_out
- python_module: special
- structured_inherits: TensorIteratorBase
- structured: True
- variants: function
- tags: pointwise
- - func: special_hermite_polynomial_h.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_hermite_polynomial_h.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CompositeExplicitAutograd: special_hermite_polynomial_h_out
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_hermite_polynomial_he(Tensor x, Tensor n) -> Tensor
- device_check: NoCheck
- python_module: special
- structured_delegate: special_hermite_polynomial_he.out
- variants: function
- tags: pointwise
- - func: special_hermite_polynomial_he.x_scalar(Scalar x, Tensor n) -> Tensor
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_hermite_polynomial_he.n_scalar(Tensor x, Scalar n) -> Tensor
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_hermite_polynomial_he.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck
- dispatch:
- CPU, CUDA: special_hermite_polynomial_he_out
- python_module: special
- structured_inherits: TensorIteratorBase
- structured: True
- variants: function
- tags: pointwise
- - func: special_hermite_polynomial_he.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_hermite_polynomial_he.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CompositeExplicitAutograd: special_hermite_polynomial_he_out
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_laguerre_polynomial_l(Tensor x, Tensor n) -> Tensor
- device_check: NoCheck
- python_module: special
- structured_delegate: special_laguerre_polynomial_l.out
- variants: function
- tags: pointwise
- - func: special_laguerre_polynomial_l.x_scalar(Scalar x, Tensor n) -> Tensor
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_laguerre_polynomial_l.n_scalar(Tensor x, Scalar n) -> Tensor
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_laguerre_polynomial_l.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck
- dispatch:
- CPU, CUDA: special_laguerre_polynomial_l_out
- python_module: special
- structured_inherits: TensorIteratorBase
- structured: True
- variants: function
- tags: pointwise
- - func: special_laguerre_polynomial_l.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_laguerre_polynomial_l.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CompositeExplicitAutograd: special_laguerre_polynomial_l_out
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_legendre_polynomial_p(Tensor x, Tensor n) -> Tensor
- device_check: NoCheck
- python_module: special
- structured_delegate: special_legendre_polynomial_p.out
- variants: function
- tags: pointwise
- - func: special_legendre_polynomial_p.x_scalar(Scalar x, Tensor n) -> Tensor
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_legendre_polynomial_p.n_scalar(Tensor x, Scalar n) -> Tensor
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_legendre_polynomial_p.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck
- dispatch:
- CPU, CUDA: special_legendre_polynomial_p_out
- python_module: special
- structured_inherits: TensorIteratorBase
- structured: True
- variants: function
- tags: pointwise
- - func: special_legendre_polynomial_p.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_legendre_polynomial_p.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CompositeExplicitAutograd: special_legendre_polynomial_p_out
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_modified_bessel_i0(Tensor self) -> Tensor
- python_module: special
- structured_delegate: special_modified_bessel_i0.out
- variants: function
- tags: pointwise
- - func: special_modified_bessel_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU, CUDA: special_modified_bessel_i0_out
- python_module: special
- structured_inherits: TensorIteratorBase
- structured: True
- variants: function
- tags: pointwise
- - func: special_modified_bessel_i1(Tensor self) -> Tensor
- python_module: special
- structured_delegate: special_modified_bessel_i1.out
- variants: function
- tags: pointwise
- - func: special_modified_bessel_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU, CUDA: special_modified_bessel_i1_out
- python_module: special
- structured_inherits: TensorIteratorBase
- structured: True
- variants: function
- tags: pointwise
- - func: special_modified_bessel_k0(Tensor self) -> Tensor
- python_module: special
- structured_delegate: special_modified_bessel_k0.out
- variants: function
- tags: pointwise
- - func: special_modified_bessel_k0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU, CUDA: special_modified_bessel_k0_out
- python_module: special
- structured_inherits: TensorIteratorBase
- structured: True
- variants: function
- tags: pointwise
- - func: special_modified_bessel_k1(Tensor self) -> Tensor
- python_module: special
- structured_delegate: special_modified_bessel_k1.out
- variants: function
- tags: pointwise
- - func: special_modified_bessel_k1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU, CUDA: special_modified_bessel_k1_out
- python_module: special
- structured_inherits: TensorIteratorBase
- structured: True
- variants: function
- tags: pointwise
- - func: special_scaled_modified_bessel_k0(Tensor x) -> Tensor
- python_module: special
- structured_delegate: special_scaled_modified_bessel_k0.out
- variants: function
- tags: pointwise
- - func: special_scaled_modified_bessel_k0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU, CUDA: special_scaled_modified_bessel_k0_out
- python_module: special
- structured_inherits: TensorIteratorBase
- structured: True
- variants: function
- tags: pointwise
- - func: special_scaled_modified_bessel_k1(Tensor x) -> Tensor
- python_module: special
- structured_delegate: special_scaled_modified_bessel_k1.out
- variants: function
- tags: pointwise
- - func: special_scaled_modified_bessel_k1.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU, CUDA: special_scaled_modified_bessel_k1_out
- python_module: special
- structured_inherits: TensorIteratorBase
- structured: True
- variants: function
- tags: pointwise
- - func: special_shifted_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor
- device_check: NoCheck
- python_module: special
- structured_delegate: special_shifted_chebyshev_polynomial_t.out
- variants: function
- tags: pointwise
- - func: special_shifted_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_shifted_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_shifted_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck
- dispatch:
- CPU, CUDA: special_shifted_chebyshev_polynomial_t_out
- python_module: special
- structured_inherits: TensorIteratorBase
- structured: True
- variants: function
- tags: pointwise
- - func: special_shifted_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_shifted_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_t_out
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_shifted_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor
- device_check: NoCheck
- python_module: special
- structured_delegate: special_shifted_chebyshev_polynomial_u.out
- variants: function
- tags: pointwise
- - func: special_shifted_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_shifted_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_shifted_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck
- dispatch:
- CPU, CUDA: special_shifted_chebyshev_polynomial_u_out
- python_module: special
- structured_inherits: TensorIteratorBase
- structured: True
- variants: function
- tags: pointwise
- - func: special_shifted_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_shifted_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_u_out
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_shifted_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor
- device_check: NoCheck
- python_module: special
- structured_delegate: special_shifted_chebyshev_polynomial_v.out
- variants: function
- tags: pointwise
- - func: special_shifted_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_shifted_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_shifted_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck
- dispatch:
- CPU, CUDA: special_shifted_chebyshev_polynomial_v_out
- python_module: special
- structured_inherits: TensorIteratorBase
- structured: True
- variants: function
- tags: pointwise
- - func: special_shifted_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_shifted_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_v_out
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_shifted_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor
- device_check: NoCheck
- python_module: special
- structured_delegate: special_shifted_chebyshev_polynomial_w.out
- variants: function
- tags: pointwise
- - func: special_shifted_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_shifted_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_shifted_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck
- dispatch:
- CPU, CUDA: special_shifted_chebyshev_polynomial_w_out
- python_module: special
- structured_inherits: TensorIteratorBase
- structured: True
- variants: function
- tags: pointwise
- - func: special_shifted_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_shifted_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_w_out
- device_check: NoCheck
- python_module: special
- variants: function
- tags: pointwise
- - func: special_spherical_bessel_j0(Tensor x) -> Tensor
- python_module: special
- structured_delegate: special_spherical_bessel_j0.out
- variants: function
- tags: pointwise
- - func: special_spherical_bessel_j0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
- dispatch:
- CPU, CUDA: special_spherical_bessel_j0_out
- python_module: special
- structured_inherits: TensorIteratorBase
- structured: True
- variants: function
- tags: pointwise
- # Aux function used in the test TestPythonDispatch.test_kwarg_only_and_positional_default
- # within test/test_python_dispatch.py
- - func: _foobar(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True) -> Tensor
- dispatch:
- CPU: foobar
- autogen: _foobar.out
- # Fused Optimizer CUDA kernels.
- - func: _fused_adam_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
- # Unlike "foreach" functions, lists of tensors should be guaranteed to be on the same device (for now).
- variants: function
- dispatch:
- CUDA: _fused_adam_kernel_cuda_
- autogen: _fused_adam, _fused_adam.out
- - func: _fused_adamw_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
- # Unlike "foreach" functions, lists of tensors should be guaranteed to be on the same device (for now).
- variants: function
- dispatch:
- CUDA: _fused_adamw_kernel_cuda_
- autogen: _fused_adamw, _fused_adamw.out
|