native_functions.yaml 533 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906109071090810909109101091110912109131091410915109161091710918109191092010921109221092310924109251092610927109281092910930109311093210933109341093510936109371093810939109401094110942109431094410945109461094710948109491095010951109521095310954109551095610957109581095910960109611096210963109641096510966109671096810969109701097110972109731097410975109761097710978109791098010981109821098310984109851098610987109881098910990109911099210993109941099510996109971099810999110001100111002110031100411005110061100711008110091101011011110121101311014110151101611017110181101911020110211102211023110241102511026110271102811029110301103111032110331103411035110361103711038110391104011041110421104311044110451104611047110481104911050110511105211053110541105511056110571105811059110601106111062110631106411065110661106711068110691107011071110721107311074110751107611077110781107911080110811108211083110841108511086110871108811089110901109111092110931109411095110961109711098110991110011101111021110311104111051110611107111081110911110111111111211113111141111511116111171111811119111201112111122111231112411125111261112711128111291113011131111321113311134111351113611137111381113911140111411114211143111441114511146111471114811149111501115111152111531115411155111561115711158111591116011161111621116311164111651116611167111681116911170111711117211173111741117511176111771117811179111801118111182111831118411185111861118711188111891119011191111921119311194111951119611197111981119911200112011120211203112041120511206112071120811209112101121111212112131121411215112161121711218112191122011221112221122311224112251122611227112281122911230112311123211233112341123511236112371123811239112401124111242112431124411245112461124711248112491125011251112521125311254112551125611257112581125911260112611126211263112641126511266112671126811269112701127111272112731127411275112761127711278112791128011281112821128311284112851128611287112881128911290112911129211293112941129511296112971129811299113001130111302113031130411305113061130711308113091131011311113121131311314113151131611317113181131911320113211132211323113241132511326113271132811329113301133111332113331133411335113361133711338113391134011341113421134311344113451134611347113481134911350113511135211353113541135511356113571135811359113601136111362113631136411365113661136711368113691137011371113721137311374113751137611377113781137911380113811138211383113841138511386113871138811389113901139111392113931139411395113961139711398113991140011401114021140311404114051140611407114081140911410114111141211413114141141511416114171141811419114201142111422114231142411425114261142711428114291143011431114321143311434114351143611437114381143911440114411144211443114441144511446114471144811449114501145111452114531145411455114561145711458114591146011461114621146311464114651146611467114681146911470114711147211473114741147511476114771147811479114801148111482114831148411485114861148711488114891149011491114921149311494114951149611497114981149911500115011150211503115041150511506115071150811509115101151111512115131151411515115161151711518115191152011521115221152311524115251152611527115281152911530115311153211533115341153511536115371153811539115401154111542115431154411545115461154711548115491155011551115521155311554115551155611557115581155911560115611156211563115641156511566115671156811569115701157111572115731157411575115761157711578115791158011581115821158311584115851158611587115881158911590115911159211593115941159511596115971159811599116001160111602116031160411605116061160711608116091161011611116121161311614116151161611617116181161911620116211162211623116241162511626116271162811629116301163111632116331163411635116361163711638116391164011641116421164311644116451164611647116481164911650116511165211653116541165511656116571165811659116601166111662116631166411665116661166711668116691167011671116721167311674116751167611677116781167911680116811168211683116841168511686116871168811689116901169111692116931169411695116961169711698116991170011701117021170311704117051170611707117081170911710117111171211713117141171511716117171171811719117201172111722117231172411725117261172711728117291173011731117321173311734117351173611737117381173911740117411174211743117441174511746117471174811749117501175111752117531175411755117561175711758117591176011761117621176311764117651176611767117681176911770117711177211773117741177511776117771177811779117801178111782117831178411785117861178711788117891179011791117921179311794117951179611797117981179911800118011180211803118041180511806118071180811809118101181111812118131181411815118161181711818118191182011821118221182311824118251182611827118281182911830118311183211833118341183511836118371183811839118401184111842118431184411845118461184711848118491185011851118521185311854118551185611857118581185911860118611186211863118641186511866118671186811869118701187111872118731187411875118761187711878118791188011881118821188311884118851188611887118881188911890118911189211893118941189511896118971189811899119001190111902119031190411905119061190711908119091191011911119121191311914119151191611917119181191911920119211192211923119241192511926119271192811929119301193111932119331193411935119361193711938119391194011941119421194311944119451194611947119481194911950119511195211953119541195511956119571195811959119601196111962119631196411965119661196711968119691197011971119721197311974119751197611977119781197911980119811198211983119841198511986119871198811989119901199111992119931199411995119961199711998119991200012001120021200312004120051200612007120081200912010120111201212013120141201512016120171201812019120201202112022120231202412025120261202712028120291203012031120321203312034120351203612037120381203912040120411204212043120441204512046120471204812049120501205112052120531205412055120561205712058120591206012061120621206312064120651206612067120681206912070120711207212073120741207512076120771207812079120801208112082120831208412085120861208712088120891209012091120921209312094120951209612097120981209912100121011210212103121041210512106121071210812109121101211112112121131211412115121161211712118121191212012121121221212312124121251212612127121281212912130121311213212133121341213512136121371213812139121401214112142121431214412145121461214712148121491215012151121521215312154121551215612157121581215912160121611216212163121641216512166121671216812169121701217112172121731217412175121761217712178121791218012181121821218312184121851218612187121881218912190121911219212193121941219512196121971219812199122001220112202122031220412205122061220712208122091221012211122121221312214122151221612217122181221912220122211222212223122241222512226122271222812229122301223112232122331223412235122361223712238122391224012241122421224312244122451224612247122481224912250122511225212253122541225512256122571225812259122601226112262122631226412265122661226712268122691227012271122721227312274122751227612277122781227912280122811228212283122841228512286122871228812289122901229112292122931229412295122961229712298122991230012301123021230312304123051230612307123081230912310123111231212313123141231512316123171231812319123201232112322123231232412325123261232712328123291233012331123321233312334123351233612337123381233912340123411234212343123441234512346123471234812349123501235112352123531235412355123561235712358123591236012361123621236312364123651236612367123681236912370123711237212373123741237512376123771237812379123801238112382123831238412385123861238712388123891239012391123921239312394123951239612397123981239912400124011240212403124041240512406124071240812409124101241112412124131241412415124161241712418124191242012421124221242312424124251242612427124281242912430124311243212433124341243512436124371243812439124401244112442124431244412445124461244712448124491245012451124521245312454124551245612457124581245912460124611246212463124641246512466124671246812469124701247112472124731247412475124761247712478124791248012481124821248312484124851248612487124881248912490124911249212493124941249512496124971249812499125001250112502125031250412505125061250712508125091251012511125121251312514125151251612517125181251912520125211252212523125241252512526125271252812529125301253112532125331253412535125361253712538125391254012541125421254312544125451254612547125481254912550125511255212553125541255512556125571255812559125601256112562125631256412565125661256712568125691257012571125721257312574125751257612577125781257912580125811258212583125841258512586125871258812589125901259112592125931259412595125961259712598125991260012601126021260312604126051260612607126081260912610126111261212613126141261512616126171261812619126201262112622126231262412625126261262712628126291263012631126321263312634126351263612637126381263912640126411264212643126441264512646126471264812649126501265112652126531265412655126561265712658126591266012661126621266312664126651266612667126681266912670126711267212673126741267512676126771267812679126801268112682126831268412685126861268712688126891269012691126921269312694126951269612697126981269912700127011270212703127041270512706127071270812709127101271112712127131271412715127161271712718127191272012721127221272312724127251272612727127281272912730127311273212733127341273512736127371273812739127401274112742127431274412745127461274712748127491275012751127521275312754127551275612757127581275912760127611276212763127641276512766127671276812769127701277112772127731277412775127761277712778127791278012781127821278312784127851278612787127881278912790127911279212793127941279512796127971279812799128001280112802128031280412805128061280712808128091281012811128121281312814128151281612817128181281912820128211282212823128241282512826128271282812829128301283112832128331283412835128361283712838128391284012841128421284312844128451284612847128481284912850128511285212853128541285512856128571285812859128601286112862128631286412865128661286712868128691287012871128721287312874128751287612877128781287912880128811288212883128841288512886128871288812889128901289112892128931289412895128961289712898128991290012901129021290312904129051290612907129081290912910129111291212913129141291512916129171291812919129201292112922129231292412925129261292712928129291293012931129321293312934129351293612937129381293912940129411294212943129441294512946129471294812949129501295112952129531295412955129561295712958129591296012961129621296312964129651296612967129681296912970129711297212973129741297512976129771297812979129801298112982129831298412985129861298712988129891299012991129921299312994129951299612997129981299913000130011300213003130041300513006130071300813009130101301113012130131301413015130161301713018130191302013021130221302313024130251302613027130281302913030130311303213033130341303513036130371303813039130401304113042130431304413045130461304713048130491305013051130521305313054130551305613057130581305913060130611306213063130641306513066130671306813069130701307113072130731307413075130761307713078130791308013081130821308313084130851308613087130881308913090130911309213093130941309513096130971309813099131001310113102131031310413105131061310713108131091311013111131121311313114131151311613117131181311913120131211312213123131241312513126131271312813129131301313113132131331313413135131361313713138131391314013141131421314313144131451314613147131481314913150131511315213153131541315513156131571315813159131601316113162131631316413165131661316713168131691317013171131721317313174131751317613177131781317913180131811318213183131841318513186131871318813189131901319113192131931319413195131961319713198131991320013201132021320313204132051320613207132081320913210132111321213213132141321513216132171321813219132201322113222132231322413225132261322713228132291323013231132321323313234132351323613237132381323913240132411324213243132441324513246132471324813249132501325113252132531325413255132561325713258132591326013261132621326313264132651326613267132681326913270132711327213273132741327513276132771327813279132801328113282132831328413285132861328713288132891329013291132921329313294132951329613297132981329913300133011330213303133041330513306133071330813309133101331113312133131331413315133161331713318133191332013321133221332313324133251332613327133281332913330133311333213333133341333513336133371333813339133401334113342133431334413345133461334713348133491335013351133521335313354133551335613357133581335913360133611336213363133641336513366133671336813369133701337113372133731337413375133761337713378133791338013381133821338313384133851338613387133881338913390133911339213393133941339513396133971339813399134001340113402134031340413405134061340713408134091341013411134121341313414134151341613417134181341913420134211342213423134241342513426134271342813429134301343113432134331343413435134361343713438134391344013441134421344313444134451344613447134481344913450134511345213453134541345513456134571345813459134601346113462134631346413465134661346713468134691347013471134721347313474134751347613477134781347913480134811348213483134841348513486134871348813489134901349113492134931349413495134961349713498134991350013501135021350313504135051350613507135081350913510135111351213513135141351513516135171351813519135201352113522135231352413525135261352713528135291353013531135321353313534135351353613537135381353913540135411354213543135441354513546135471354813549135501355113552135531355413555135561355713558135591356013561135621356313564135651356613567135681356913570135711357213573135741357513576135771357813579135801358113582135831358413585135861358713588135891359013591135921359313594135951359613597135981359913600136011360213603136041360513606136071360813609136101361113612136131361413615136161361713618136191362013621136221362313624136251362613627136281362913630136311363213633136341363513636136371363813639136401364113642136431364413645136461364713648136491365013651136521365313654136551365613657136581365913660136611366213663136641366513666136671366813669136701367113672136731367413675136761367713678136791368013681136821368313684136851368613687136881368913690136911369213693136941369513696136971369813699137001370113702137031370413705137061370713708137091371013711137121371313714137151371613717137181371913720137211372213723137241372513726137271372813729137301373113732137331373413735137361373713738137391374013741137421374313744137451374613747137481374913750137511375213753137541375513756137571375813759137601376113762137631376413765137661376713768137691377013771137721377313774137751377613777137781377913780137811378213783137841378513786137871378813789137901379113792137931379413795137961379713798137991380013801138021380313804138051380613807138081380913810138111381213813138141381513816138171381813819138201382113822138231382413825138261382713828138291383013831138321383313834138351383613837138381383913840138411384213843138441384513846138471384813849138501385113852138531385413855138561385713858138591386013861138621386313864138651386613867138681386913870138711387213873138741387513876138771387813879138801388113882138831388413885138861388713888138891389013891138921389313894138951389613897138981389913900139011390213903139041390513906139071390813909139101391113912139131391413915139161391713918139191392013921139221392313924139251392613927139281392913930139311393213933139341393513936139371393813939139401394113942139431394413945139461394713948139491395013951139521395313954139551395613957139581395913960139611396213963139641396513966139671396813969139701397113972139731397413975139761397713978139791398013981139821398313984139851398613987139881398913990139911399213993139941399513996139971399813999140001400114002140031400414005140061400714008140091401014011140121401314014140151401614017140181401914020140211402214023140241402514026140271402814029140301403114032140331403414035140361403714038140391404014041140421404314044140451404614047140481404914050140511405214053140541405514056140571405814059140601406114062140631406414065140661406714068140691407014071140721407314074140751407614077140781407914080140811408214083140841408514086140871408814089140901409114092140931409414095140961409714098140991410014101141021410314104141051410614107141081410914110141111411214113141141411514116141171411814119141201412114122141231412414125141261412714128141291413014131141321413314134141351413614137141381413914140141411414214143141441414514146141471414814149141501415114152141531415414155141561415714158141591416014161141621416314164141651416614167141681416914170141711417214173141741417514176141771417814179141801418114182141831418414185141861418714188141891419014191141921419314194141951419614197141981419914200142011420214203142041420514206142071420814209142101421114212142131421414215142161421714218142191422014221142221422314224142251422614227142281422914230142311423214233142341423514236142371423814239142401424114242142431424414245142461424714248142491425014251142521425314254142551425614257142581425914260142611426214263142641426514266142671426814269142701427114272142731427414275142761427714278142791428014281142821428314284142851428614287142881428914290142911429214293142941429514296142971429814299143001430114302143031430414305143061430714308143091431014311143121431314314143151431614317143181431914320143211432214323143241432514326143271432814329143301433114332143331433414335143361433714338143391434014341143421434314344143451434614347143481434914350143511435214353143541435514356143571435814359143601436114362143631436414365143661436714368143691437014371143721437314374143751437614377143781437914380143811438214383143841438514386143871438814389143901439114392143931439414395143961439714398143991440014401144021440314404144051440614407144081440914410144111441214413144141441514416144171441814419144201442114422144231442414425144261442714428144291443014431144321443314434144351443614437144381443914440144411444214443144441444514446144471444814449144501445114452144531445414455144561445714458144591446014461144621446314464144651446614467144681446914470144711447214473144741447514476144771447814479144801448114482144831448414485144861448714488144891449014491144921449314494144951449614497144981449914500145011450214503145041450514506145071450814509145101451114512145131451414515145161451714518145191452014521145221452314524145251452614527145281452914530145311453214533145341453514536145371453814539145401454114542145431454414545145461454714548145491455014551145521455314554145551455614557145581455914560145611456214563145641456514566145671456814569145701457114572145731457414575145761457714578145791458014581145821458314584145851458614587145881458914590145911459214593145941459514596145971459814599146001460114602146031460414605146061460714608146091461014611
  1. # See README.md in this directory for more guidance
  2. # *********NB: _cast_* operators are DEPRECATED and will be removed
  3. # eventually. These were previously used before TorchScript IR supported
  4. # representing ScalarType's. They are now superseded by usage of
  5. # `aten::to()`. The ops remain here for backward compatibility purposes.
  6. # DEPRECATED. DO NOT USE
  7. - func: _cast_Byte(Tensor self, bool non_blocking=False) -> Tensor
  8. variants: function
  9. # DEPRECATED. DO NOT USE
  10. - func: _cast_Char(Tensor self, bool non_blocking=False) -> Tensor
  11. variants: function
  12. # DEPRECATED. DO NOT USE
  13. - func: _cast_Double(Tensor self, bool non_blocking=False) -> Tensor
  14. variants: function
  15. # DEPRECATED. DO NOT USE
  16. - func: _cast_Float(Tensor self, bool non_blocking=False) -> Tensor
  17. variants: function
  18. # DEPRECATED. DO NOT USE
  19. - func: _cast_Int(Tensor self, bool non_blocking=False) -> Tensor
  20. variants: function
  21. # DEPRECATED. DO NOT USE
  22. - func: _cast_Long(Tensor self, bool non_blocking=False) -> Tensor
  23. variants: function
  24. # DEPRECATED. DO NOT USE
  25. - func: _cast_Short(Tensor self, bool non_blocking=False) -> Tensor
  26. variants: function
  27. # DEPRECATED. DO NOT USE
  28. - func: _cast_Half(Tensor self, bool non_blocking=False) -> Tensor
  29. variants: function
  30. # Computes the gradient of current tensor w.r.t. graph leaves.
  31. - func: _backward(Tensor self, Tensor[] inputs, Tensor? gradient=None, bool? retain_graph=None, bool create_graph=False) -> ()
  32. manual_cpp_binding: True
  33. variants: method
  34. # DEPRECATED. Sets the tensor data held by this `Variable` to be the same as
  35. # `new_data`. It requires that `new_data` and `Variable` have compatible tensor
  36. # type, by checking `_has_compatible_shallow_copy_type(this, new_data)`.
  37. #
  38. # This function is deprecated because it doesn't really make sense in a world
  39. # where Variables *are* Tensors (as opposed to them containing tensors, which
  40. # is what the previous interpretation was.)
  41. - func: set_data(Tensor(a!) self, Tensor new_data) -> ()
  42. manual_cpp_binding: True
  43. variants: method
  44. - func: data(Tensor self) -> Tensor
  45. manual_cpp_binding: True
  46. variants: method
  47. # True if this `Variable` is a leaf and thus does not have a `grad_fn`.
  48. - func: is_leaf(Tensor self) -> bool
  49. manual_cpp_binding: True
  50. variants: method
  51. # Returns the output index of this variable from the forward operation that
  52. # produced it. Conversely, it returns the input index of the gradient `Node` to
  53. # which this `Variable` is connected (because in the gradient computation,
  54. # inputs and outputs switch meaning). For example:
  55. #
  56. # y0, y1, y2 = f(x)
  57. # assert y0.output_nr == 0
  58. # assert y1.output_nr == 1
  59. # assert y2.output_nr == 2
  60. #
  61. - func: output_nr(Tensor self) -> int
  62. manual_cpp_binding: True
  63. variants: method
  64. - func: _version(Tensor self) -> int
  65. manual_cpp_binding: True
  66. variants: method
  67. - func: requires_grad_(Tensor(a!) self, bool requires_grad=True) -> Tensor(a!)
  68. manual_cpp_binding: True
  69. variants: method
  70. # Enables .grad attribute for non-leaf Tensors.
  71. - func: retain_grad(Tensor(a!) self) -> ()
  72. manual_cpp_binding: True
  73. variants: method
  74. - func: retains_grad(Tensor self) -> bool
  75. manual_cpp_binding: True
  76. variants: method
  77. - func: _fw_primal(Tensor(a) self, int level) -> Tensor(a)
  78. variants: method
  79. dispatch:
  80. CompositeExplicitAutograd: _fw_primal
  81. - func: _make_dual(Tensor(a) primal, Tensor tangent, int level) -> Tensor(a)
  82. variants: function
  83. dispatch:
  84. CompositeExplicitAutograd: _make_dual
  85. - func: _unpack_dual(Tensor(a) dual, int level) -> (Tensor(a) primal, Tensor tangent)
  86. variants: function
  87. # NOTE: [_new_zeros_with_same_feature_meta]
  88. # This function creates a new tensor with the layout and TensorOptions
  89. # of `other` but also takes into account the batch dimensions of `self`
  90. #
  91. # This function has a couple extra constraints because it is also used for `jvp`
  92. # in functorch.
  93. # - is used for forward AD because there is the restriction
  94. # that the primal and tangent must have the same layout
  95. # - We cannot assume that `self` and `other` have the same sizes or even dim
  96. # because in the inplace over view case, `other` is the base tensor, and
  97. # `self` is the forward grad with respect to the view, which can have an
  98. # entirely different shape
  99. # - takes the number of batch dims for `self` because we also handle
  100. # some batching logic. We handle that here instead of a batching rule because
  101. # we'd like to avoid calling as_strided in the batching rule (as to enable
  102. # nested vmap in functorch).
  103. # - needs to be CompositeExplicitAutograd for jvp support in functorch.
  104. # functorch currently relies on TensorWrapper which does not have storage
  105. # CompositeExplicitAutograd makes sure the TensorWrapper is unwrapped.
  106. # - this function may eventually take on another int argument to store the
  107. # the number of batch dims for other once we support that use case
  108. - func: _new_zeros_with_same_feature_meta(Tensor self, Tensor other, *, int self_num_batch_dims=0) -> Tensor
  109. variants: function
  110. dispatch:
  111. CompositeExplicitAutograd: _new_zeros_with_same_feature_meta
  112. autogen: _new_zeros_with_same_feature_meta.out
  113. # This function compares the storage numel of self with that of other, where
  114. # storage numel is cumputed as: `other.storage().nbytes() / other.itemsize()`.
  115. # We create this function for composite compliance purposes. The batching rule
  116. # always returns true because vmapped as_strided does not support accessing
  117. # storage locations not indexable by the input tensor.
  118. # See the note above for more information.
  119. - func: _has_same_storage_numel(Tensor self, Tensor other) -> bool
  120. variants: function
  121. dispatch:
  122. CompositeExplicitAutograd: _has_same_storage_numel
  123. - func: rename_(Tensor(a!) self, Dimname[]? names) -> Tensor(a!)
  124. variants: method
  125. tags: inplace_view
  126. - func: rename(Tensor(a) self, Dimname[]? names) -> Tensor(a)
  127. variants: method
  128. - func: align_to(Tensor(a) self, Dimname[] names) -> Tensor(a)
  129. variants: method
  130. - func: align_to.ellipsis_idx(Tensor(a) self, Dimname[] order, int ellipsis_idx) -> Tensor(a)
  131. variants: method
  132. - func: align_as(Tensor self, Tensor other) -> Tensor
  133. variants: method
  134. - func: align_tensors(Tensor[] tensors) -> Tensor[]
  135. # Not assert because it's a keyword; not Assert because FX already
  136. # took that syntax
  137. # TODO: need to specify this is side-effectful somehow
  138. - func: _assert_async(Tensor self) -> ()
  139. dispatch:
  140. CPU: _assert_async_cpu
  141. CUDA: _assert_async_cuda
  142. - func: _assert_tensor_metadata(Tensor a, int[]? size=None, int[]? stride=None, ScalarType? dtype=None) -> ()
  143. - func: refine_names(Tensor(a) self, Dimname[] names) -> Tensor(a)
  144. variants: method
  145. - func: _use_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank) -> bool
  146. device_check: NoCheck # Tensor arguments allowed to be on different devices, see also _cudnn_ctc_loss
  147. dispatch:
  148. CUDA: _use_cudnn_ctc_loss
  149. - func: _use_cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank) -> bool
  150. device_check: NoCheck # Tensor arguments allowed to be on different devices, see also _cudnn_ctc_loss
  151. dispatch:
  152. CUDA: _use_cudnn_ctc_loss_tensor
  153. - func: _cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor)
  154. device_check: NoCheck # log_probs is expected to be on CUDA while targets is expected to be on CPU
  155. dispatch:
  156. CUDA: _cudnn_ctc_loss
  157. autogen: _cudnn_ctc_loss.out
  158. - func: _cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor)
  159. device_check: NoCheck # log_probs is expected to be on CUDA while targets is expected to be on CPU
  160. dispatch:
  161. CUDA: _cudnn_ctc_loss_tensor
  162. - func: _use_cudnn_rnn_flatten_weight() -> bool
  163. - func: _cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor
  164. dispatch:
  165. CUDA: _cudnn_rnn_flatten_weight
  166. autogen: _cudnn_rnn_flatten_weight.out
  167. - func: _cudnn_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
  168. # rnn_tanh may or may not redispatch to _cudnn_rnn based on algorithm and build. Thus it might hit dispatch or kernel device check.
  169. # Disable dispatch time device check for consistent behavior.
  170. device_check: NoCheck
  171. dispatch:
  172. CUDA: _cudnn_rnn
  173. autogen: _cudnn_rnn.out
  174. - func: _cudnn_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])
  175. dispatch:
  176. CUDA: _cudnn_rnn_backward
  177. autogen: _cudnn_rnn_backward.out
  178. - func: _cudnn_init_dropout_state(float dropout, bool train, int dropout_seed, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
  179. dispatch:
  180. CUDA: _cudnn_init_dropout_state
  181. autogen: _cudnn_init_dropout_state.out
  182. - func: _debug_has_internal_overlap(Tensor self) -> int
  183. variants: function
  184. - func: _fused_dropout(Tensor self, float p, Generator? generator=None) -> (Tensor, Tensor)
  185. variants: function
  186. dispatch:
  187. CUDA: fused_dropout_cuda
  188. tags: nondeterministic_seeded
  189. autogen: _fused_dropout.out
  190. - func: _masked_scale(Tensor self, Tensor mask, float scale) -> Tensor
  191. variants: function
  192. dispatch:
  193. CUDA: masked_scale_cuda
  194. autogen: _masked_scale.out
  195. - func: native_dropout(Tensor input, float p, bool? train) -> (Tensor, Tensor)
  196. variants: function
  197. dispatch:
  198. CPU: native_dropout_cpu
  199. CUDA: native_dropout_cuda
  200. NestedTensorCPU, NestedTensorCUDA: native_dropout_nested
  201. tags: [nondeterministic_seeded, core]
  202. autogen: native_dropout.out
  203. - func: native_dropout_backward(Tensor grad_output, Tensor mask, float scale) -> Tensor
  204. dispatch:
  205. CPU, NestedTensorCPU, NestedTensorCUDA: native_dropout_backward
  206. CUDA: native_dropout_backward_cuda
  207. autogen: native_dropout_backward.out
  208. tags: pointwise
  209. - func: _sobol_engine_draw(Tensor quasi, int n, Tensor sobolstate, int dimension, int num_generated, ScalarType? dtype) -> (Tensor, Tensor)
  210. - func: _sobol_engine_ff_(Tensor(a!) self, int n, Tensor sobolstate, int dimension, int num_generated) -> Tensor(a!)
  211. - func: _sobol_engine_scramble_(Tensor(a!) self, Tensor ltm, int dimension) -> Tensor(a!)
  212. - func: _sobol_engine_initialize_state_(Tensor(a!) self, int dimension) -> Tensor(a!)
  213. - func: _reshape_from_tensor(Tensor self, Tensor shape) -> Tensor
  214. - func: _shape_as_tensor(Tensor self) -> Tensor
  215. - func: dropout(Tensor input, float p, bool train) -> Tensor
  216. tags: nondeterministic_seeded
  217. - func: dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
  218. tags: nondeterministic_seeded
  219. - func: feature_dropout(Tensor input, float p, bool train) -> Tensor
  220. tags: nondeterministic_seeded
  221. - func: feature_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
  222. tags: nondeterministic_seeded
  223. - func: alpha_dropout(Tensor input, float p, bool train) -> Tensor
  224. tags: nondeterministic_seeded
  225. - func: alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
  226. tags: nondeterministic_seeded
  227. - func: feature_alpha_dropout(Tensor input, float p, bool train) -> Tensor
  228. tags: nondeterministic_seeded
  229. - func: feature_alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
  230. tags: nondeterministic_seeded
  231. - func: abs(Tensor self) -> Tensor
  232. device_check: NoCheck # TensorIterator
  233. variants: function, method
  234. dispatch:
  235. CompositeExplicitAutograd: abs
  236. SparseCPU, SparseCUDA: abs_sparse
  237. SparseCsrCPU, SparseCsrCUDA: abs_sparse_csr
  238. tags: [core, pointwise]
  239. - func: abs_(Tensor(a!) self) -> Tensor(a!)
  240. device_check: NoCheck # TensorIterator
  241. variants: function, method
  242. dispatch:
  243. CompositeExplicitAutograd: abs_
  244. SparseCPU, SparseCUDA: abs_sparse_
  245. SparseCsrCPU, SparseCsrCUDA: abs_sparse_csr_
  246. - func: abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  247. device_check: NoCheck # TensorIterator
  248. dispatch:
  249. CPU, CUDA: abs_out
  250. MPS: abs_out_mps
  251. SparseCPU, SparseCUDA: abs_sparse_out
  252. SparseCsrCPU, SparseCsrCUDA: abs_sparse_csr_out
  253. tags: pointwise
  254. # Note [Adding an alias]
  255. # To add an alias do the following:
  256. #
  257. # 1) Copy the original functions native_functions.yaml entry, but replace the
  258. # original function's name with their own and delete any dispatch
  259. # keys for the aliases. Specifying a dispatch key will prevent
  260. # autograd from recording the operations the alias performs, which
  261. # will stop it from "inheriting" the original operation's autograd behavior.
  262. # 2) Implement the corresponding functions and have them redispatch to the
  263. # original function.
  264. # 3) Add docstrings to the new function that reference the original function,
  265. # and document the method as usual (if it exists.)
  266. # (See torch/_torch_docs.py and docs/source/torch.rst if adding a function,
  267. # torch/_tensor_docs.py and docs/source/tensors.rst if adding a method,
  268. # or module-specific doc bindings (like torch/linalg/__init__.py) if
  269. # adding an alias in a namespace.)
  270. # 4) Update torch/overrides.py consistent with the original function.
  271. # 5) Update the alias_map in torch/csrc/jit/passes/normalize_ops.cpp.
  272. # 6) Add aliases argument to existing OpInfo/UnaryUfuncInfo or create new OpInfo/UnaryUfuncInfo entry
  273. # in op_db list in torch/testing/_internal/common_methods_invocations.py
  274. #
  275. # See torch.absolute, an alias for torch.abs, as an example.
  276. # Absolute, alias for abs
  277. - func: absolute(Tensor self) -> Tensor
  278. device_check: NoCheck # TensorIterator
  279. variants: function, method
  280. - func: absolute_(Tensor(a!) self) -> Tensor(a!)
  281. device_check: NoCheck # TensorIterator
  282. variants: method
  283. - func: absolute.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  284. device_check: NoCheck # TensorIterator
  285. - func: angle(Tensor self) -> Tensor
  286. device_check: NoCheck # TensorIterator
  287. variants: function, method
  288. dispatch:
  289. CPU, CUDA: angle
  290. SparseCsrCPU, SparseCsrCUDA: angle_sparse_csr
  291. tags: pointwise
  292. - func: angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  293. device_check: NoCheck # TensorIterator
  294. dispatch:
  295. CPU, CUDA: angle_out
  296. SparseCsrCPU, SparseCsrCUDA: angle_sparse_csr_out
  297. tags: pointwise
  298. - func: view_as_real(Tensor(a) self) -> Tensor(a)
  299. variants: function
  300. dispatch:
  301. CPU, CUDA, MPS, Meta: view_as_real
  302. - func: view_as_complex(Tensor(a) self) -> Tensor(a)
  303. variants: function
  304. dispatch:
  305. CPU, CUDA, Meta: view_as_complex
  306. - func: sgn(Tensor self) -> Tensor
  307. variants: function, method
  308. structured_delegate: sgn.out
  309. dispatch:
  310. SparseCPU, SparseCUDA: sgn_sparse
  311. SparseCsrCPU, SparseCsrCUDA: sgn_sparse_csr
  312. tags: pointwise
  313. - func: sgn_(Tensor(a!) self) -> Tensor(a!)
  314. variants: method
  315. structured_delegate: sgn.out
  316. dispatch:
  317. SparseCPU, SparseCUDA: sgn_sparse_
  318. SparseCsrCPU, SparseCsrCUDA: sgn_sparse_csr_
  319. tags: pointwise
  320. - func: sgn.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  321. structured: True
  322. structured_inherits: TensorIteratorBase
  323. dispatch:
  324. CPU, CUDA: sgn_out
  325. SparseCPU, SparseCUDA: sgn_sparse_out
  326. SparseCsrCPU, SparseCsrCUDA: sgn_sparse_csr_out
  327. tags: pointwise
  328. - func: chalf(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor
  329. variants: method
  330. - func: real(Tensor(a) self) -> Tensor(a)
  331. device_check: NoCheck # TensorIterator
  332. variants: function
  333. - func: imag(Tensor(a) self) -> Tensor(a)
  334. device_check: NoCheck # TensorIterator
  335. variants: function
  336. - func: _conj(Tensor(a) self) -> Tensor(a)
  337. variants: function, method
  338. dispatch:
  339. CompositeExplicitAutograd: _conj
  340. - func: conj(Tensor(a) self) -> Tensor(a)
  341. variants: function, method
  342. manual_cpp_binding: True
  343. - func: _conj_physical(Tensor self) -> Tensor
  344. variants: function, method
  345. dispatch:
  346. CompositeExplicitAutograd: _conj_physical
  347. SparseCsrCPU, SparseCsrCUDA: conj_physical_sparse_csr
  348. autogen: _conj_physical.out
  349. - func: conj_physical(Tensor self) -> Tensor
  350. variants: function, method
  351. tags: pointwise
  352. - func: conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  353. dispatch:
  354. CPU, CUDA: conj_physical_out
  355. SparseCPU, SparseCUDA: conj_physical_out_sparse
  356. SparseCsrCPU, SparseCsrCUDA: conj_physical_sparse_csr_out
  357. tags: pointwise
  358. - func: conj_physical_(Tensor(a!) self) -> Tensor(a!)
  359. variants: function, method
  360. dispatch:
  361. CompositeExplicitAutograd: conj_physical_
  362. SparseCsrCPU, SparseCsrCUDA: conj_physical_sparse_csr_
  363. tags: pointwise
  364. - func: resolve_conj(Tensor(a) self) -> Tensor(a)
  365. variants: function, method
  366. - func: resolve_neg(Tensor(a) self) -> Tensor(a)
  367. variants: function, method
  368. - func: _neg_view(Tensor(a) self) -> Tensor(a)
  369. variants: function, method
  370. dispatch:
  371. CompositeExplicitAutograd: _neg_view
  372. - func: acos(Tensor self) -> Tensor
  373. device_check: NoCheck # TensorIterator
  374. variants: function, method
  375. structured_delegate: acos.out
  376. tags: [core, pointwise]
  377. - func: acos_(Tensor(a!) self) -> Tensor(a!)
  378. device_check: NoCheck # TensorIterator
  379. variants: function, method
  380. structured_delegate: acos.out
  381. tags: pointwise
  382. - func: acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  383. device_check: NoCheck # TensorIterator
  384. structured: True
  385. structured_inherits: TensorIteratorBase
  386. dispatch:
  387. CPU, CUDA: acos_out
  388. MPS: acos_out_mps
  389. tags: pointwise
  390. # arccos, alias of acos
  391. - func: arccos(Tensor self) -> Tensor
  392. variants: function, method
  393. - func: arccos_(Tensor(a!) self) -> Tensor(a!)
  394. variants: function, method
  395. - func: arccos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  396. - func: avg_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, bool ceil_mode=False, bool count_include_pad=True) -> Tensor
  397. - func: adaptive_avg_pool1d(Tensor self, int[1] output_size) -> Tensor
  398. # Return: (Tensor output, Tensor indices)
  399. - func: adaptive_max_pool1d(Tensor self, int[1] output_size) -> (Tensor, Tensor)
  400. - func: add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
  401. device_check: NoCheck # TensorIterator
  402. structured_delegate: add.out
  403. variants: function, method
  404. dispatch:
  405. SparseCPU, SparseCUDA: add_sparse
  406. SparseCsrCPU, SparseCsrCUDA: add_sparse_csr
  407. MkldnnCPU: mkldnn_add
  408. ZeroTensor: add_zerotensor
  409. NestedTensorCPU, NestedTensorCUDA: NestedTensor_add_Tensor
  410. tags: [core, pointwise]
  411. - func: add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
  412. device_check: NoCheck # TensorIterator
  413. variants: method
  414. structured_delegate: add.out
  415. dispatch:
  416. SparseCPU, SparseCUDA: add_sparse_
  417. SparseCsrCPU, SparseCsrCUDA: add_sparse_csr_
  418. MkldnnCPU: mkldnn_add_
  419. NestedTensorCPU, NestedTensorCUDA: NestedTensor_add__Tensor
  420. tags: pointwise
  421. - func: add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
  422. device_check: NoCheck # TensorIterator
  423. structured: True
  424. structured_inherits: TensorIteratorBase
  425. ufunc_inner_loop:
  426. Generic: add (AllAndComplex, BFloat16, Half, ComplexHalf)
  427. ScalarOnly: add (Bool)
  428. dispatch:
  429. SparseCPU: add_out_sparse_cpu
  430. SparseCUDA: add_out_sparse_cuda
  431. SparseCsrCPU: add_out_sparse_csr_cpu
  432. SparseCsrCUDA: add_out_sparse_csr_cuda
  433. MkldnnCPU: mkldnn_add_out
  434. MPS: add_out_mps
  435. tags: pointwise
  436. - func: _add_relu.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
  437. variants: function
  438. dispatch:
  439. CPU: add_relu
  440. - func: _add_relu_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
  441. variants: function
  442. dispatch:
  443. CPU: add_relu_
  444. - func: _add_relu.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
  445. variants: function
  446. dispatch:
  447. CPU: add_relu_out
  448. - func: _add_relu.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
  449. variants: function
  450. dispatch:
  451. CPU: add_relu
  452. - func: _add_relu_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
  453. variants: function
  454. dispatch:
  455. CPU: add_relu_
  456. autogen: _add_relu.Scalar_out
  457. # For C++ only, until we have conversion from C++ numbers to Tensor
  458. - func: add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
  459. device_check: NoCheck # TensorIterator
  460. variants: function, method
  461. dispatch:
  462. CompositeExplicitAutograd: add
  463. tags: [core, pointwise]
  464. - func: add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
  465. device_check: NoCheck # TensorIterator
  466. variants: method
  467. dispatch:
  468. CompositeExplicitAutograd: add_
  469. autogen: add.Scalar_out
  470. tags: pointwise
  471. - func: addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor
  472. structured_delegate: addmv.out
  473. variants: function, method
  474. - func: addmv_(Tensor(a!) self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
  475. structured_delegate: addmv.out
  476. variants: function, method
  477. - func: addmv.out(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
  478. structured: True
  479. dispatch:
  480. CPU: addmv_out_cpu
  481. CUDA: addmv_out_cuda
  482. MPS: addmv_out_mps
  483. SparseCsrCPU: addmv_out_sparse_compressed
  484. SparseCsrCUDA: addmv_out_sparse_compressed_cuda
  485. - func: addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
  486. variants: function, method
  487. dispatch:
  488. CPU, CUDA: addr
  489. MPS: addr_mps
  490. CompositeExplicitAutograd: math_addr
  491. - func: addr_(Tensor(a!) self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
  492. variants: method
  493. dispatch:
  494. CompositeExplicitAutograd: addr_
  495. - func: addr.out(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
  496. dispatch:
  497. CPU, CUDA: addr_out
  498. MPS: addr_out_mps
  499. CompositeExplicitAutograd: math_addr_out
  500. - func: affine_grid_generator(Tensor theta, int[] size, bool align_corners) -> Tensor
  501. variants: function
  502. dispatch:
  503. CompositeExplicitAutograd: affine_grid_generator
  504. autogen: affine_grid_generator.out
  505. - func: affine_grid_generator_backward(Tensor grad, int[] size, bool align_corners) -> Tensor
  506. variants: function
  507. - func: _is_all_true(Tensor self) -> Tensor
  508. variants: function, method
  509. dispatch:
  510. CompositeExplicitAutograd: _is_all_true
  511. - func: _is_any_true(Tensor self) -> Tensor
  512. variants: function, method
  513. dispatch:
  514. CompositeExplicitAutograd: _is_any_true
  515. # Note: this function is only for testing.
  516. - func: _test_check_tensor(Tensor self) -> Tensor
  517. variants: function
  518. - func: all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor
  519. device_check: NoCheck # TensorIterator
  520. structured_delegate: all.out
  521. variants: function, method
  522. - func: all.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
  523. device_check: NoCheck # TensorIterator
  524. structured: True
  525. precomputed:
  526. - dim -> int dim
  527. dispatch:
  528. CPU, CUDA: all_out
  529. MPS: all_out_mps
  530. - func: all.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor
  531. device_check: NoCheck # TensorIterator
  532. variants: function, method
  533. - func: all.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
  534. device_check: NoCheck # TensorIterator
  535. - func: allclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> bool
  536. variants: function, method
  537. tags: data_dependent_output
  538. dispatch:
  539. CompositeExplicitAutograd: allclose
  540. - func: any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor
  541. device_check: NoCheck # TensorIterator
  542. structured_delegate: any.out
  543. variants: function, method
  544. - func: any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
  545. device_check: NoCheck # TensorIterator
  546. structured: True
  547. precomputed:
  548. - dim -> int dim
  549. dispatch:
  550. CPU, CUDA: any_out
  551. MPS: any_out_mps
  552. - func: any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor
  553. device_check: NoCheck # TensorIterator
  554. variants: function, method
  555. - func: any.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
  556. device_check: NoCheck # TensorIterator
  557. - func: arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  558. dispatch:
  559. CompositeExplicitAutograd: arange
  560. - func: arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  561. dispatch:
  562. CompositeExplicitAutograd: arange
  563. # This operator should be named `aragne.start_out` if following the naming convention. However that
  564. # name is already taken. Disabled because of CI job failures.
  565. # FIXME: enable this
  566. #- func: arange.start_out_(Scalar start, Scalar end, *, Tensor(a!) out) -> Tensor(a!)
  567. # dispatch:
  568. # CompositeExplicitAutograd: arange_start_out
  569. - func: arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  570. dispatch:
  571. CompositeExplicitAutograd: arange
  572. cpp_no_default_args: ['step']
  573. tags: core
  574. - func: arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
  575. dispatch:
  576. CompositeExplicitAutograd: arange_out
  577. - func: arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
  578. dispatch:
  579. CPU, Meta: arange_out
  580. CUDA: arange_cuda_out
  581. MPS: arange_mps_out
  582. cpp_no_default_args: ['step']
  583. # This function is a temporary hack to allow tracing of arange like constructs with dynamic
  584. # bounds on arange. Normal arange is not traceable because it does not take any tensor inputs;
  585. # if the range you need is based on another tensor, calling this function directly will
  586. # preserve tracing. Get rid of this when arange can directly take tensors for bounds
  587. # (so that it can be traced directly).
  588. - func: _dim_arange(Tensor like, int dim) -> Tensor
  589. - func: argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor
  590. structured_delegate: argmax.out
  591. device_check: NoCheck # TensorIterator
  592. variants: function, method
  593. tags: core
  594. - func: argmax.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
  595. structured: True
  596. dispatch:
  597. CPU, CUDA: argmax_out
  598. MPS: argmax_out_mps
  599. - func: argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor
  600. structured_delegate: argmin.out
  601. device_check: NoCheck # TensorIterator
  602. variants: function, method
  603. tags: core
  604. - func: argmin.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
  605. structured: True
  606. dispatch:
  607. CPU, CUDA: argmin_out
  608. MPS: argmin_out_mps
  609. - func: acosh(Tensor self) -> Tensor
  610. variants: function, method
  611. structured_delegate: acosh.out
  612. tags: [core, pointwise]
  613. - func: acosh_(Tensor(a!) self) -> Tensor(a!)
  614. variants: function, method
  615. structured_delegate: acosh.out
  616. tags: pointwise
  617. - func: acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  618. structured: True
  619. structured_inherits: TensorIteratorBase
  620. dispatch:
  621. CPU, CUDA: acosh_out
  622. MPS: acosh_out_mps
  623. tags: pointwise
  624. # arccosh, alias for acosh
  625. - func: arccosh(Tensor self) -> Tensor
  626. variants: function, method
  627. - func: arccosh_(Tensor(a!) self) -> Tensor(a!)
  628. variants: function, method
  629. - func: arccosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  630. - func: asinh(Tensor self) -> Tensor
  631. variants: function, method
  632. structured_delegate: asinh.out
  633. dispatch:
  634. SparseCPU, SparseCUDA: asinh_sparse
  635. SparseCsrCPU, SparseCsrCUDA: asinh_sparse_csr
  636. tags: [core, pointwise]
  637. - func: asinh_(Tensor(a!) self) -> Tensor(a!)
  638. variants: function, method
  639. structured_delegate: asinh.out
  640. dispatch:
  641. SparseCPU, SparseCUDA: asinh_sparse_
  642. SparseCsrCPU, SparseCsrCUDA: asinh_sparse_csr_
  643. tags: pointwise
  644. - func: asinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  645. structured: True
  646. structured_inherits: TensorIteratorBase
  647. dispatch:
  648. CPU, CUDA: asinh_out
  649. MPS: asinh_out_mps
  650. SparseCPU, SparseCUDA: asinh_sparse_out
  651. SparseCsrCPU, SparseCsrCUDA: asinh_sparse_csr_out
  652. tags: pointwise
  653. # arcsinh, alias for asinh
  654. - func: arcsinh(Tensor self) -> Tensor
  655. variants: function, method
  656. - func: arcsinh_(Tensor(a!) self) -> Tensor(a!)
  657. variants: function, method
  658. - func: arcsinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  659. - func: atanh(Tensor self) -> Tensor
  660. structured_delegate: atanh.out
  661. variants: function, method
  662. dispatch:
  663. SparseCPU, SparseCUDA: atanh_sparse
  664. SparseCsrCPU, SparseCsrCUDA: atanh_sparse_csr
  665. tags: [core, pointwise]
  666. - func: atanh_(Tensor(a!) self) -> Tensor(a!)
  667. structured_delegate: atanh.out
  668. variants: function, method
  669. dispatch:
  670. SparseCPU, SparseCUDA: atanh_sparse_
  671. SparseCsrCPU, SparseCsrCUDA: atanh_sparse_csr_
  672. tags: pointwise
  673. - func: atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  674. structured: True
  675. structured_inherits: TensorIteratorBase
  676. dispatch:
  677. CPU, CUDA: atanh_out
  678. MPS: atanh_out_mps
  679. SparseCPU, SparseCUDA: atanh_sparse_out
  680. SparseCsrCPU, SparseCsrCUDA: atanh_sparse_csr_out
  681. tags: pointwise
  682. # arctanh, alias for atanh
  683. - func: arctanh(Tensor self) -> Tensor
  684. variants: function, method
  685. - func: arctanh_(Tensor(a!) self) -> Tensor(a!)
  686. variants: function, method
  687. - func: arctanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  688. - func: as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a)
  689. variants: function, method
  690. dispatch:
  691. ZeroTensor, CPU, CUDA: as_strided_tensorimpl
  692. Meta: as_strided_tensorimpl_meta_symint
  693. MPS: as_strided_tensorimpl_mps
  694. QuantizedCPU, QuantizedCUDA: as_strided_qtensorimpl
  695. device_check: NoCheck
  696. device_guard: False
  697. tags: core
  698. - func: as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!)
  699. use_const_ref_for_mutable_tensors: True
  700. variants: function, method
  701. device_check: NoCheck
  702. device_guard: False
  703. tags: inplace_view
  704. dispatch:
  705. CompositeExplicitAutogradNonFunctional: as_strided__symint
  706. - func: asin(Tensor self) -> Tensor
  707. device_check: NoCheck # TensorIterator
  708. variants: function, method
  709. structured_delegate: asin.out
  710. dispatch:
  711. SparseCPU, SparseCUDA: asin_sparse
  712. SparseCsrCPU, SparseCsrCUDA: asin_sparse_csr
  713. tags: [core, pointwise]
  714. - func: asin_(Tensor(a!) self) -> Tensor(a!)
  715. device_check: NoCheck # TensorIterator
  716. variants: function, method
  717. structured_delegate: asin.out
  718. dispatch:
  719. SparseCPU, SparseCUDA: asin_sparse_
  720. SparseCsrCPU, SparseCsrCUDA: asin_sparse_csr_
  721. tags: pointwise
  722. - func: asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  723. device_check: NoCheck # TensorIterator
  724. structured: True
  725. structured_inherits: TensorIteratorBase
  726. dispatch:
  727. CPU, CUDA: asin_out
  728. MPS: asin_out_mps
  729. SparseCPU, SparseCUDA: asin_sparse_out
  730. SparseCsrCPU, SparseCsrCUDA: asin_sparse_csr_out
  731. tags: pointwise
  732. # arcsin, alias of asin
  733. - func: arcsin(Tensor self) -> Tensor
  734. variants: function, method
  735. - func: arcsin_(Tensor(a!) self) -> Tensor(a!)
  736. variants: function, method
  737. - func: arcsin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  738. - func: atan(Tensor self) -> Tensor
  739. device_check: NoCheck # TensorIterator
  740. structured_delegate: atan.out
  741. variants: function, method
  742. dispatch:
  743. SparseCPU, SparseCUDA: atan_sparse
  744. SparseCsrCPU, SparseCsrCUDA: atan_sparse_csr
  745. tags: [core, pointwise]
  746. - func: atan_(Tensor(a!) self) -> Tensor(a!)
  747. device_check: NoCheck # TensorIterator
  748. structured_delegate: atan.out
  749. variants: function, method
  750. dispatch:
  751. SparseCPU, SparseCUDA: atan_sparse_
  752. SparseCsrCPU, SparseCsrCUDA: atan_sparse_csr_
  753. tags: pointwise
  754. - func: atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  755. device_check: NoCheck # TensorIterator
  756. structured: True
  757. structured_inherits: TensorIteratorBase
  758. dispatch:
  759. CPU, CUDA: atan_out
  760. MPS: atan_out_mps
  761. SparseCPU, SparseCUDA: atan_sparse_out
  762. SparseCsrCPU, SparseCsrCUDA: atan_sparse_csr_out
  763. tags: pointwise
  764. # arctan, alias of atan
  765. - func: arctan(Tensor self) -> Tensor
  766. variants: function, method
  767. - func: arctan_(Tensor(a!) self) -> Tensor(a!)
  768. variants: function, method
  769. - func: arctan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  770. - func: atleast_1d(Tensor self) -> Tensor
  771. variants: function
  772. - func: atleast_1d.Sequence(Tensor[] tensors) -> Tensor[]
  773. - func: atleast_2d(Tensor self) -> Tensor
  774. variants: function
  775. - func: atleast_2d.Sequence(Tensor[] tensors) -> Tensor[]
  776. variants: function
  777. - func: atleast_3d(Tensor self) -> Tensor
  778. variants: function
  779. - func: atleast_3d.Sequence(Tensor[] tensors) -> Tensor[]
  780. variants: function
  781. - func: baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
  782. variants: function, method
  783. structured_delegate: baddbmm.out
  784. - func: baddbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
  785. variants: method
  786. structured_delegate: baddbmm.out
  787. - func: baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
  788. structured: True
  789. variants: function
  790. dispatch:
  791. CPU: baddbmm_out_cpu
  792. CUDA: baddbmm_out_cuda
  793. MPS: baddbmm_out_mps
  794. SparseCsrCUDA: baddbmm_out_sparse_csr_cuda
  795. - func: bartlett_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  796. dispatch:
  797. CompositeExplicitAutograd: bartlett_window
  798. autogen: bartlett_window.out
  799. - func: bartlett_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  800. dispatch:
  801. CompositeExplicitAutograd: bartlett_window
  802. autogen: bartlett_window.periodic_out
  803. - func: batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor
  804. - func: quantized_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor
  805. dispatch:
  806. QuantizedCPU: quantized_batch_norm
  807. autogen: quantized_batch_norm.out
  808. - func: _batch_norm_impl_index(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> (Tensor, Tensor, Tensor, Tensor, int)
  809. - func: _batch_norm_impl_index_backward(int impl_index, Tensor input, Tensor grad_output, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var_transform, bool train, float eps, bool[3] output_mask, Tensor reservedSpace) -> (Tensor, Tensor, Tensor)
  810. # Sample bernoulli with values in `self` as probability.
  811. - func: bernoulli(Tensor self, *, Generator? generator=None) -> Tensor
  812. device_check: NoCheck # TensorIterator
  813. variants: function, method
  814. dispatch:
  815. CompositeExplicitAutograd: bernoulli
  816. tags: nondeterministic_seeded
  817. - func: bernoulli.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
  818. device_check: NoCheck # TensorIterator
  819. variants: function
  820. tags: nondeterministic_seeded
  821. dispatch:
  822. CPU, CUDA: bernoulli_out
  823. MPS: bernoulli_out_mps
  824. - func: bernoulli_.Tensor(Tensor(a!) self, Tensor p, *, Generator? generator=None) -> Tensor(a!)
  825. device_check: NoCheck # TensorIterator
  826. variants: method
  827. tags: nondeterministic_seeded
  828. dispatch:
  829. CPU, CUDA: bernoulli_
  830. MPS: bernoulli_mps_
  831. autogen: bernoulli.Tensor, bernoulli.Tensor_out
  832. - func: bernoulli_.float(Tensor(a!) self, float p=0.5, *, Generator? generator=None) -> Tensor(a!)
  833. device_check: NoCheck # TensorIterator
  834. variants: method
  835. tags: nondeterministic_seeded
  836. dispatch:
  837. CPU, CUDA: bernoulli_
  838. MPS: bernoulli_mps_
  839. autogen: bernoulli.float_out
  840. # Note [bernoulli.p schema]
  841. # We should probably just fix the overload ambiguity by appending a _functional to the C++ API name (BC breaking)
  842. # This out-of-place version isn't used explicitly, but needed by jit.
  843. # There is no default valid on `p` here because it would introduce ambiguity
  844. # with `bernoulli(Tensor self, *, Generator? generator=None)` declaration.
  845. - func: bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor
  846. device_check: NoCheck # TensorIterator
  847. variants: function, method
  848. tags: nondeterministic_seeded
  849. dispatch:
  850. CompositeExplicitAutogradNonFunctional: bernoulli
  851. - func: bilinear(Tensor input1, Tensor input2, Tensor weight, Tensor? bias=None) -> Tensor
  852. - func: binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor
  853. device_check: NoCheck # TensorIterator
  854. python_module: nn
  855. variants: function
  856. dispatch:
  857. CPU: binary_cross_entropy_cpu
  858. CUDA: binary_cross_entropy_cuda
  859. MPS: binary_cross_entropy_mps
  860. - func: binary_cross_entropy.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
  861. device_check: NoCheck # TensorIterator
  862. python_module: nn
  863. variants: function
  864. dispatch:
  865. CPU: binary_cross_entropy_out_cpu
  866. CUDA: binary_cross_entropy_out_cuda
  867. MPS: binary_cross_entropy_out_mps
  868. - func: binary_cross_entropy_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor
  869. python_module: nn
  870. variants: function
  871. dispatch:
  872. CPU: binary_cross_entropy_backward_cpu
  873. CUDA: binary_cross_entropy_backward_cuda
  874. MPS: binary_cross_entropy_backward_mps
  875. - func: binary_cross_entropy_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!)
  876. python_module: nn
  877. variants: function
  878. dispatch:
  879. CPU: binary_cross_entropy_backward_out_cpu
  880. CUDA: binary_cross_entropy_backward_out_cuda
  881. MPS: binary_cross_entropy_backward_out_mps
  882. - func: binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor
  883. device_check: NoCheck # TensorIterator
  884. variants: function
  885. dispatch:
  886. CompositeExplicitAutograd: binary_cross_entropy_with_logits
  887. autogen: binary_cross_entropy_with_logits.out
  888. - func: bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor
  889. variants: function, method
  890. dispatch:
  891. CPU: _bincount_cpu
  892. CUDA: _bincount_cuda
  893. MPS: _bincount_mps
  894. tags: dynamic_output_shape
  895. autogen: bincount.out
  896. - func: bitwise_not(Tensor self) -> Tensor
  897. device_check: NoCheck # TensorIterator
  898. structured_delegate: bitwise_not.out
  899. variants: function, method
  900. tags: [core, pointwise]
  901. - func: bitwise_not_(Tensor(a!) self) -> Tensor(a!)
  902. device_check: NoCheck # TensorIterator
  903. structured_delegate: bitwise_not.out
  904. variants: method
  905. tags: pointwise
  906. - func: bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  907. device_check: NoCheck # TensorIterator
  908. structured: True
  909. structured_inherits: TensorIteratorBase
  910. dispatch:
  911. CPU, CUDA: bitwise_not_out
  912. tags: pointwise
  913. - func: copysign.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  914. device_check: NoCheck # TensorIterator
  915. structured: True
  916. structured_inherits: TensorIteratorBase
  917. dispatch:
  918. CPU, CUDA: copysign_out
  919. tags: pointwise
  920. - func: copysign.Tensor(Tensor self, Tensor other) -> Tensor
  921. device_check: NoCheck # TensorIterator
  922. variants: function, method
  923. structured_delegate: copysign.out
  924. tags: pointwise
  925. - func: copysign_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
  926. device_check: NoCheck # TensorIterator
  927. variants: method
  928. structured_delegate: copysign.out
  929. - func: copysign.Scalar(Tensor self, Scalar other) -> Tensor
  930. variants: function, method
  931. dispatch:
  932. CompositeExplicitAutograd: copysign
  933. tags: pointwise
  934. - func: copysign_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
  935. variants: method
  936. dispatch:
  937. CompositeExplicitAutograd: copysign_
  938. - func: copysign.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
  939. dispatch:
  940. CompositeExplicitAutograd: copysign_out
  941. tags: pointwise
  942. - func: logical_not(Tensor self) -> Tensor
  943. device_check: NoCheck # TensorIterator
  944. variants: function, method
  945. dispatch:
  946. CompositeExplicitAutograd: logical_not
  947. tags: [core, pointwise]
  948. - func: logical_not_(Tensor(a!) self) -> Tensor(a!)
  949. device_check: NoCheck # TensorIterator
  950. variants: method
  951. dispatch:
  952. CompositeExplicitAutograd: logical_not_
  953. tags: pointwise
  954. - func: logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  955. device_check: NoCheck # TensorIterator
  956. dispatch:
  957. CPU, CUDA: logical_not_out
  958. MPS: logical_not_out_mps
  959. tags: pointwise
  960. - func: logical_xor(Tensor self, Tensor other) -> Tensor
  961. device_check: NoCheck # TensorIterator
  962. variants: function, method
  963. dispatch:
  964. CompositeExplicitAutograd: logical_xor
  965. tags: pointwise
  966. - func: logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!)
  967. device_check: NoCheck # TensorIterator
  968. variants: method
  969. dispatch:
  970. CompositeExplicitAutograd: logical_xor_
  971. tags: pointwise
  972. - func: logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  973. device_check: NoCheck # TensorIterator
  974. dispatch:
  975. CPU, CUDA: logical_xor_out
  976. MPS: logical_xor_out_mps
  977. tags: pointwise
  978. - func: logical_and(Tensor self, Tensor other) -> Tensor
  979. device_check: NoCheck # TensorIterator
  980. variants: function, method
  981. dispatch:
  982. CompositeExplicitAutograd: logical_and
  983. tags: [core, pointwise]
  984. - func: logical_and_(Tensor(a!) self, Tensor other) -> Tensor(a!)
  985. device_check: NoCheck # TensorIterator
  986. variants: method
  987. dispatch:
  988. CompositeExplicitAutograd: logical_and_
  989. tags: pointwise
  990. - func: logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  991. device_check: NoCheck # TensorIterator
  992. dispatch:
  993. CPU, CUDA: logical_and_out
  994. MPS: logical_and_out_mps
  995. tags: pointwise
  996. - func: logical_or(Tensor self, Tensor other) -> Tensor
  997. device_check: NoCheck # TensorIterator
  998. variants: function, method
  999. dispatch:
  1000. CompositeExplicitAutograd: logical_or
  1001. tags: [core, pointwise]
  1002. - func: logical_or_(Tensor(a!) self, Tensor other) -> Tensor(a!)
  1003. device_check: NoCheck # TensorIterator
  1004. variants: method
  1005. dispatch:
  1006. CompositeExplicitAutograd: logical_or_
  1007. tags: pointwise
  1008. - func: logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  1009. device_check: NoCheck # TensorIterator
  1010. dispatch:
  1011. CPU, CUDA: logical_or_out
  1012. MPS: logical_or_out_mps
  1013. tags: pointwise
  1014. - func: blackman_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  1015. dispatch:
  1016. CompositeExplicitAutograd: blackman_window
  1017. autogen: blackman_window.out
  1018. - func: blackman_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  1019. dispatch:
  1020. CompositeExplicitAutograd: blackman_window
  1021. autogen: blackman_window.periodic_out
  1022. - func: bmm(Tensor self, Tensor mat2) -> Tensor
  1023. structured_delegate: bmm.out
  1024. variants: function, method
  1025. dispatch:
  1026. SparseCPU: bmm_sparse_cpu
  1027. SparseCUDA: bmm_sparse_cuda
  1028. NestedTensorCPU: bmm_nested
  1029. NestedTensorCUDA: bmm_nested_cuda
  1030. tags: core
  1031. - func: bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
  1032. structured: True
  1033. variants: function
  1034. dispatch:
  1035. CPU: bmm_out_cpu
  1036. CUDA: bmm_out_cuda
  1037. MPS: bmm_out_mps
  1038. SparseCPU: bmm_out_sparse_cpu
  1039. SparseCUDA: bmm_out_sparse_cuda
  1040. SparseCsrCUDA: bmm_out_sparse_csr_cuda
  1041. - func: broadcast_tensors(Tensor[] tensors) -> Tensor[]
  1042. device_check: NoCheck
  1043. device_guard: False
  1044. - func: broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a)
  1045. variants: function, method
  1046. dispatch:
  1047. CompositeImplicitAutograd: broadcast_to_symint
  1048. - func: _sparse_broadcast_to(Tensor(a) self, int[] size) -> Tensor(a)
  1049. variants: function
  1050. dispatch:
  1051. SparseCPU, SparseCUDA: sparse_broadcast_to
  1052. - func: cat(Tensor[] tensors, int dim=0) -> Tensor
  1053. structured_delegate: cat.out
  1054. dispatch:
  1055. SparseCPU, SparseCUDA: cat_sparse
  1056. QuantizedCPU: cat_quantized_cpu
  1057. tags: core
  1058. - func: cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
  1059. structured: True
  1060. precomputed:
  1061. - dim -> int dim, int valid, bool all_contiguous, bool all_same_dtype, bool all_same_sizes_and_stride, MemoryFormat memory_format
  1062. dispatch:
  1063. CPU: cat_out_cpu
  1064. CUDA: cat_out_cuda
  1065. MPS: cat_out_mps
  1066. QuantizedCPU: cat_out_quantized_cpu
  1067. - func: cat.names(Tensor[] tensors, Dimname dim) -> Tensor
  1068. - func: cat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
  1069. # alias for torch.cat
  1070. - func: concat(Tensor[] tensors, int dim=0) -> Tensor
  1071. - func: concat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
  1072. - func: concat.names(Tensor[] tensors, Dimname dim) -> Tensor
  1073. - func: concat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
  1074. # alias for torch.cat
  1075. - func: concatenate(Tensor[] tensors, int dim=0) -> Tensor
  1076. - func: concatenate.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
  1077. - func: concatenate.names(Tensor[] tensors, Dimname dim) -> Tensor
  1078. - func: concatenate.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
  1079. - func: block_diag(Tensor[] tensors) -> Tensor
  1080. variants: function
  1081. dispatch:
  1082. CompositeExplicitAutograd: block_diag
  1083. autogen: block_diag.out
  1084. - func: ceil(Tensor self) -> Tensor
  1085. device_check: NoCheck # TensorIterator
  1086. structured_delegate: ceil.out
  1087. variants: function, method
  1088. dispatch:
  1089. SparseCPU, SparseCUDA: ceil_sparse
  1090. SparseCsrCPU, SparseCsrCUDA: ceil_sparse_csr
  1091. tags: pointwise
  1092. - func: ceil_(Tensor(a!) self) -> Tensor(a!)
  1093. device_check: NoCheck # TensorIterator
  1094. structured_delegate: ceil.out
  1095. variants: function, method
  1096. dispatch:
  1097. SparseCPU, SparseCUDA: ceil_sparse_
  1098. SparseCsrCPU, SparseCsrCUDA: ceil_sparse_csr_
  1099. tags: pointwise
  1100. - func: ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  1101. device_check: NoCheck # TensorIterator
  1102. structured: True
  1103. structured_inherits: TensorIteratorBase
  1104. dispatch:
  1105. CPU, CUDA: ceil_out
  1106. MPS: ceil_out_mps
  1107. SparseCPU, SparseCUDA: ceil_sparse_out
  1108. SparseCsrCPU, SparseCsrCUDA: ceil_sparse_csr_out
  1109. tags: pointwise
  1110. # alias for torch.linalg.multi_dot
  1111. - func: chain_matmul(Tensor[] matrices) -> Tensor
  1112. variants: function
  1113. # alias for torch.linalg.multi_dot
  1114. - func: chain_matmul.out(Tensor[] matrices, *, Tensor(a!) out) -> Tensor(a!)
  1115. - func: unsafe_chunk(Tensor self, int chunks, int dim=0) -> Tensor[]
  1116. variants: function, method
  1117. device_check: NoCheck
  1118. device_guard: False
  1119. - func: chunk(Tensor(a -> *) self, int chunks, int dim=0) -> Tensor(a)[]
  1120. variants: function, method
  1121. device_check: NoCheck
  1122. device_guard: False
  1123. dispatch:
  1124. CompositeImplicitAutograd: chunk
  1125. NestedTensorCPU, NestedTensorCUDA: chunk_nested_tensor
  1126. - func: tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[]
  1127. variants: function, method
  1128. dispatch:
  1129. CompositeImplicitAutograd: tensor_split_sections_symint
  1130. - func: tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[]
  1131. variants: function, method
  1132. dispatch:
  1133. CompositeImplicitAutograd: tensor_split_indices_symint
  1134. - func: tensor_split.tensor_indices_or_sections(Tensor(a -> *) self, Tensor tensor_indices_or_sections, int dim=0) -> Tensor(a)[]
  1135. variants: function, method
  1136. - func: clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor
  1137. device_check: NoCheck # TensorIterator
  1138. variants: function, method
  1139. cpp_no_default_args: ['min']
  1140. structured_delegate: clamp.out
  1141. dispatch:
  1142. QuantizedCPU: clamp_quantized_cpu
  1143. tags: [core, pointwise]
  1144. - func: clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor
  1145. variants: function, method
  1146. structured_delegate: clamp.Tensor_out
  1147. tags: pointwise
  1148. - func: clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)
  1149. device_check: NoCheck # TensorIterator
  1150. variants: function, method
  1151. cpp_no_default_args: ['min']
  1152. structured_delegate: clamp.out
  1153. tags: pointwise
  1154. - func: clamp_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)
  1155. variants: function, method
  1156. structured_delegate: clamp.Tensor_out
  1157. tags: pointwise
  1158. - func: clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)
  1159. device_check: NoCheck # TensorIterator
  1160. cpp_no_default_args: ['min']
  1161. structured: True
  1162. structured_inherits: TensorIteratorBase
  1163. dispatch:
  1164. CPU, CUDA: clamp_out
  1165. MPS: clamp_out_mps
  1166. tags: pointwise
  1167. - func: clamp.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)
  1168. device_check: NoCheck # TensorIterator
  1169. structured: True
  1170. structured_inherits: TensorIteratorBase
  1171. dispatch:
  1172. CPU, CUDA: clamp_Tensor_out
  1173. MPS: clamp_Tensor_out_mps
  1174. tags: pointwise
  1175. - func: clamp_max(Tensor self, Scalar max) -> Tensor
  1176. device_check: NoCheck # TensorIterator
  1177. variants: function, method
  1178. structured_delegate: clamp_max.out
  1179. tags: pointwise
  1180. - func: clamp_max.Tensor(Tensor self, Tensor max) -> Tensor
  1181. variants: function, method
  1182. structured_delegate: clamp_max.Tensor_out
  1183. tags: pointwise
  1184. - func: clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!)
  1185. device_check: NoCheck # TensorIterator
  1186. variants: function, method
  1187. structured_delegate: clamp_max.out
  1188. tags: pointwise
  1189. - func: clamp_max_.Tensor(Tensor(a!) self, Tensor max) -> Tensor(a!)
  1190. variants: function, method
  1191. structured_delegate: clamp_max.Tensor_out
  1192. tags: pointwise
  1193. - func: clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!)
  1194. device_check: NoCheck # TensorIterator
  1195. structured: True
  1196. structured_inherits: TensorIteratorBase
  1197. dispatch:
  1198. CPU, CUDA: clamp_max_out
  1199. MPS: clamp_max_out_mps
  1200. tags: pointwise
  1201. - func: clamp_max.Tensor_out(Tensor self, Tensor max, *, Tensor(a!) out) -> Tensor(a!)
  1202. device_check: NoCheck # TensorIterator
  1203. structured: True
  1204. structured_inherits: TensorIteratorBase
  1205. dispatch:
  1206. CPU, CUDA: clamp_max_Tensor_out
  1207. MPS: clamp_max_Tensor_out_mps
  1208. tags: pointwise
  1209. - func: clamp_min(Tensor self, Scalar min) -> Tensor
  1210. device_check: NoCheck # TensorIterator
  1211. variants: function, method
  1212. structured_delegate: clamp_min.out
  1213. tags: pointwise
  1214. - func: clamp_min.Tensor(Tensor self, Tensor min) -> Tensor
  1215. variants: function, method
  1216. structured_delegate: clamp_min.Tensor_out
  1217. tags: pointwise
  1218. - func: clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!)
  1219. device_check: NoCheck # TensorIterator
  1220. variants: function, method
  1221. structured_delegate: clamp_min.out
  1222. tags: pointwise
  1223. - func: clamp_min_.Tensor(Tensor(a!) self, Tensor min) -> Tensor(a!)
  1224. variants: function, method
  1225. structured_delegate: clamp_min.Tensor_out
  1226. tags: pointwise
  1227. - func: clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!)
  1228. device_check: NoCheck # TensorIterator
  1229. structured: True
  1230. structured_inherits: TensorIteratorBase
  1231. dispatch:
  1232. CPU, CUDA: clamp_min_out
  1233. MPS: clamp_min_out_mps
  1234. tags: pointwise
  1235. - func: clamp_min.Tensor_out(Tensor self, Tensor min, *, Tensor(a!) out) -> Tensor(a!)
  1236. device_check: NoCheck # TensorIterator
  1237. structured: True
  1238. structured_inherits: TensorIteratorBase
  1239. dispatch:
  1240. CPU, CUDA: clamp_min_Tensor_out
  1241. MPS: clamp_min_Tensor_out_mps
  1242. tags: pointwise
  1243. # clip is an alias for clamp
  1244. - func: clip(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor
  1245. cpp_no_default_args: ['min']
  1246. variants: function, method
  1247. tags: pointwise
  1248. - func: clip.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor
  1249. variants: function, method
  1250. tags: pointwise
  1251. - func: clip_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)
  1252. cpp_no_default_args: ['min']
  1253. variants: function, method
  1254. tags: pointwise
  1255. - func: clip_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)
  1256. variants: function, method
  1257. tags: pointwise
  1258. - func: clip.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)
  1259. cpp_no_default_args: ['min']
  1260. tags: pointwise
  1261. - func: clip.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)
  1262. - func: cudnn_is_acceptable(Tensor self) -> bool
  1263. device_check: NoCheck
  1264. device_guard: False
  1265. - func: complex(Tensor real, Tensor imag) -> Tensor
  1266. variants: function
  1267. dispatch:
  1268. CompositeExplicitAutograd: complex
  1269. - func: complex.out(Tensor real, Tensor imag, *, Tensor(a!) out) -> Tensor(a!)
  1270. dispatch:
  1271. CPU, CUDA: complex_out
  1272. - func: polar(Tensor abs, Tensor angle) -> Tensor
  1273. variants: function
  1274. dispatch:
  1275. CompositeExplicitAutograd: polar
  1276. - func: polar.out(Tensor abs, Tensor angle, *, Tensor(a!) out) -> Tensor(a!)
  1277. dispatch:
  1278. CPU, CUDA: polar_out
  1279. - func: constant_pad_nd(Tensor self, SymInt[] pad, Scalar value=0) -> Tensor
  1280. variants: function
  1281. dispatch:
  1282. CompositeExplicitAutograd: constant_pad_nd
  1283. MPS: constant_pad_nd_mps
  1284. autogen: constant_pad_nd.out
  1285. tags: core
  1286. - func: contiguous(Tensor(a) self, *, MemoryFormat memory_format=contiguous_format) -> Tensor(a)
  1287. variants: method
  1288. manual_cpp_binding: True
  1289. - func: convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups) -> Tensor
  1290. dispatch:
  1291. CompositeExplicitAutograd: convolution
  1292. autogen: convolution.out
  1293. tags: core
  1294. - func: convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
  1295. dispatch:
  1296. CompositeExplicitAutograd, CUDA: convolution_backward
  1297. autogen: convolution_backward.out
  1298. tags: core
  1299. - func: convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor
  1300. dispatch:
  1301. CompositeExplicitAutograd: convolution_overrideable
  1302. autogen: convolution_overrideable.out
  1303. - func: convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)
  1304. dispatch:
  1305. CompositeExplicitAutograd: convolution_backward_overrideable
  1306. autogen: convolution_backward_overrideable.out
  1307. - func: _convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor
  1308. dispatch:
  1309. CompositeExplicitAutograd: _convolution
  1310. autogen: _convolution.out
  1311. - func: _convolution.deprecated(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor
  1312. - func: _convolution_mode(Tensor input, Tensor weight, Tensor? bias, int[] stride, str padding, int[] dilation, int groups) -> Tensor
  1313. - func: _convolution_double_backward(Tensor? ggI, Tensor? ggW, Tensor? ggb, Tensor gO, Tensor weight, Tensor self, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
  1314. - func: conv1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] dilation=1, int groups=1) -> Tensor
  1315. - func: conv2d(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, int groups=1) -> Tensor
  1316. - func: conv3d(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1, int groups=1) -> Tensor
  1317. - func: conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, str padding="valid", int[1] dilation=1, int groups=1) -> Tensor
  1318. cpp_no_default_args: ['bias', 'stride', 'padding']
  1319. - func: conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, str padding="valid", int[2] dilation=1, int groups=1) -> Tensor
  1320. cpp_no_default_args: ['bias', 'stride', 'padding']
  1321. - func: conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, str padding="valid", int[3] dilation=1, int groups=1) -> Tensor
  1322. cpp_no_default_args: ['bias', 'stride', 'padding']
  1323. - func: conv_tbc(Tensor self, Tensor weight, Tensor bias, int pad=0) -> Tensor
  1324. dispatch:
  1325. CompositeExplicitAutograd: conv_tbc
  1326. autogen: conv_tbc.out
  1327. - func: conv_tbc_backward(Tensor self, Tensor input, Tensor weight, Tensor bias, int pad) -> (Tensor, Tensor, Tensor)
  1328. # NB: we inherit the goofy argument order from PyTorch torch.nn.functional
  1329. - func: conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] output_padding=0, int groups=1, int[1] dilation=1) -> Tensor
  1330. - func: conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int groups=1, int[2] dilation=1) -> Tensor
  1331. - func: conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int groups=1, int[3] dilation=1) -> Tensor
  1332. - func: copy(Tensor self, Tensor src, bool non_blocking=False) -> Tensor
  1333. variants: function
  1334. dispatch:
  1335. CompositeExplicitAutogradNonFunctional: copy
  1336. - func: copy_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)
  1337. variants: method
  1338. device_check: NoCheck
  1339. device_guard: False
  1340. dispatch:
  1341. MkldnnCPU: copy_mkldnn_
  1342. SparseCPU, SparseCUDA: copy_sparse_wrapper_
  1343. CompositeExplicitAutograd: copy_
  1344. SparseCsrCPU, SparseCsrCUDA: copy_sparse_compressed_
  1345. NestedTensorCPU, NestedTensorCUDA: copy_nested_
  1346. autogen: copy.out
  1347. - func: _copy_from(Tensor self, Tensor dst, bool non_blocking=False) -> Tensor
  1348. dispatch:
  1349. MPS: _copy_from_mps
  1350. autogen: _copy_from.out
  1351. # We need this to be able to properly copy from a CPU to an XLA tensor with different sizes.
  1352. # See https://github.com/pytorch/xla/issues/2881
  1353. - func: _copy_from_and_resize(Tensor self, Tensor dst) -> Tensor
  1354. dispatch:
  1355. MPS: _copy_from_and_resize_mps
  1356. autogen: _copy_from_and_resize.out
  1357. - func: cos(Tensor self) -> Tensor
  1358. device_check: NoCheck # TensorIterator
  1359. variants: function, method
  1360. structured_delegate: cos.out
  1361. tags: [core, pointwise]
  1362. - func: cos_(Tensor(a!) self) -> Tensor(a!)
  1363. device_check: NoCheck # TensorIterator
  1364. variants: function, method
  1365. structured_delegate: cos.out
  1366. tags: pointwise
  1367. - func: cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  1368. device_check: NoCheck # TensorIterator
  1369. structured: True
  1370. structured_inherits: TensorIteratorBase
  1371. dispatch:
  1372. CPU, CUDA: cos_out
  1373. MPS: cos_out_mps
  1374. tags: pointwise
  1375. - func: cosh(Tensor self) -> Tensor
  1376. device_check: NoCheck # TensorIterator
  1377. variants: function, method
  1378. structured_delegate: cosh.out
  1379. tags: [core, pointwise]
  1380. - func: cosh_(Tensor(a!) self) -> Tensor(a!)
  1381. device_check: NoCheck # TensorIterator
  1382. variants: function, method
  1383. structured_delegate: cosh.out
  1384. tags: pointwise
  1385. - func: cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  1386. device_check: NoCheck # TensorIterator
  1387. structured: True
  1388. structured_inherits: TensorIteratorBase
  1389. dispatch:
  1390. CPU, CUDA: cosh_out
  1391. MPS: cosh_out_mps
  1392. tags: pointwise
  1393. - func: cosine_embedding_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor
  1394. - func: count_nonzero.dim_IntList(Tensor self, int[] dim) -> Tensor
  1395. variants: function, method
  1396. dispatch:
  1397. CPU: count_nonzero_cpu
  1398. CUDA: count_nonzero_cuda
  1399. MPS: count_nonzero_mps
  1400. autogen: count_nonzero.dim_IntList_out
  1401. - func: count_nonzero(Tensor self, int? dim=None) -> Tensor
  1402. variants: function, method
  1403. dispatch:
  1404. CompositeExplicitAutograd: count_nonzero
  1405. autogen: count_nonzero.out
  1406. - func: cov(Tensor self, *, int correction=1, Tensor? fweights=None, Tensor? aweights=None) -> Tensor
  1407. variants: function, method
  1408. - func: corrcoef(Tensor self) -> Tensor
  1409. variants: function, method
  1410. - func: cudnn_affine_grid_generator(Tensor theta, int N, int C, int H, int W) -> Tensor grid
  1411. dispatch:
  1412. CUDA: cudnn_affine_grid_generator_forward
  1413. autogen: cudnn_affine_grid_generator.out
  1414. # TODO: Why do I have to call this grad?!
  1415. - func: cudnn_affine_grid_generator_backward(Tensor grad, int N, int C, int H, int W) -> Tensor grad_theta
  1416. dispatch:
  1417. CUDA: cudnn_affine_grid_generator_backward
  1418. autogen: cudnn_affine_grid_generator_backward.out
  1419. - func: cudnn_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor, Tensor)
  1420. dispatch:
  1421. CUDA: cudnn_batch_norm
  1422. autogen: cudnn_batch_norm.out
  1423. # NB: You can only use this if you used cudnn_batch_norm training=True
  1424. - func: cudnn_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace) -> (Tensor, Tensor, Tensor)
  1425. dispatch:
  1426. CUDA: cudnn_batch_norm_backward
  1427. autogen: cudnn_batch_norm_backward.out
  1428. - func: cudnn_convolution(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor
  1429. dispatch:
  1430. CUDA: cudnn_convolution
  1431. autogen: cudnn_convolution.out
  1432. - func: cudnn_convolution_transpose(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor
  1433. dispatch:
  1434. CUDA: cudnn_convolution_transpose
  1435. autogen: cudnn_convolution_transpose.out
  1436. - func: _mps_convolution_transpose(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups) -> Tensor
  1437. dispatch:
  1438. MPS: _mps_convolution_transpose
  1439. autogen: _mps_convolution_transpose.out
  1440. - func: mps_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool[2] output_mask) -> (Tensor, Tensor)
  1441. dispatch:
  1442. MPS: mps_convolution_transpose_backward
  1443. autogen: mps_convolution_transpose_backward.out
  1444. - func: cudnn_convolution_relu(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor
  1445. dispatch:
  1446. CUDA: cudnn_convolution_relu
  1447. autogen: cudnn_convolution_relu.out
  1448. - func: cudnn_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor
  1449. dispatch:
  1450. CUDA: cudnn_convolution_add_relu
  1451. autogen: cudnn_convolution_add_relu.out
  1452. # NB: input is special cased in a way I don't quite understand
  1453. - func: cudnn_grid_sampler(Tensor self, Tensor grid) -> Tensor output
  1454. dispatch:
  1455. CUDA: cudnn_grid_sampler_forward
  1456. autogen: cudnn_grid_sampler.out
  1457. - func: cudnn_grid_sampler_backward(Tensor self, Tensor grid, Tensor grad_output) -> (Tensor grad_self, Tensor grad_grid)
  1458. dispatch:
  1459. CUDA: cudnn_grid_sampler_backward
  1460. autogen: cudnn_grid_sampler_backward.out
  1461. - func: cummax(Tensor self, int dim) -> (Tensor values, Tensor indices)
  1462. device_check: NoCheck # TensorIterator
  1463. variants: function, method
  1464. dispatch:
  1465. CompositeExplicitAutograd: cummax
  1466. - func: cummax.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
  1467. device_check: NoCheck # TensorIterator
  1468. dispatch:
  1469. CompositeExplicitAutograd: cummax_out
  1470. - func: cummax.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices)
  1471. device_check: NoCheck # TensorIterator
  1472. variants: function, method
  1473. - func: cummax.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
  1474. device_check: NoCheck # TensorIterator
  1475. - func: _cummax_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> ()
  1476. variants: function
  1477. dispatch:
  1478. CPU: cummax_helper_cpu
  1479. CUDA: cummax_helper_cuda
  1480. - func: cummin(Tensor self, int dim) -> (Tensor values, Tensor indices)
  1481. device_check: NoCheck # TensorIterator
  1482. variants: function, method
  1483. dispatch:
  1484. CompositeExplicitAutograd: cummin
  1485. - func: cummin.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
  1486. device_check: NoCheck # TensorIterator
  1487. dispatch:
  1488. CompositeExplicitAutograd: cummin_out
  1489. - func: cummin.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices)
  1490. device_check: NoCheck # TensorIterator
  1491. variants: function, method
  1492. - func: cummin.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
  1493. device_check: NoCheck # TensorIterator
  1494. - func: _cummin_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> ()
  1495. variants: function
  1496. dispatch:
  1497. CPU: cummin_helper_cpu
  1498. CUDA: cummin_helper_cuda
  1499. - func: cummaxmin_backward(Tensor grad, Tensor input, Tensor indices, int dim) -> Tensor
  1500. variants: function
  1501. device_check: NoCheck
  1502. device_guard: False
  1503. - func: cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor
  1504. structured_delegate: cumprod.out
  1505. device_check: NoCheck # TensorIterator
  1506. variants: function, method
  1507. - func: cumprod_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)
  1508. structured_delegate: cumprod.out
  1509. variants: method
  1510. - func: cumprod.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
  1511. structured: True
  1512. device_check: NoCheck # TensorIterator
  1513. dispatch:
  1514. CPU, CUDA: cumprod_out
  1515. - func: cumprod.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
  1516. device_check: NoCheck # TensorIterator
  1517. variants: function, method
  1518. - func: cumprod_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!)
  1519. variants: method
  1520. - func: cumprod.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
  1521. device_check: NoCheck # TensorIterator
  1522. - func: cumprod_backward(Tensor grad, Tensor input, int dim, Tensor output) -> Tensor
  1523. variants: function
  1524. device_check: NoCheck
  1525. device_guard: False
  1526. - func: cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor
  1527. structured_delegate: cumsum.out
  1528. device_check: NoCheck # TensorIterator
  1529. variants: function, method
  1530. - func: cumsum_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)
  1531. structured_delegate: cumsum.out
  1532. variants: method
  1533. - func: cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
  1534. structured: True
  1535. device_check: NoCheck # TensorIterator
  1536. dispatch:
  1537. CPU, CUDA: cumsum_out
  1538. MPS: cumsum_out_mps
  1539. - func: cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
  1540. device_check: NoCheck # TensorIterator
  1541. variants: function, method
  1542. - func: cumsum_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!)
  1543. variants: method
  1544. - func: cumsum.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
  1545. device_check: NoCheck # TensorIterator
  1546. - func: cumulative_trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor
  1547. - func: cumulative_trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor
  1548. - func: ctc_loss.IntList(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor
  1549. # convenience function that converts to intlists for you
  1550. - func: ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor
  1551. - func: _ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)
  1552. dispatch:
  1553. CPU: ctc_loss_cpu
  1554. CUDA: ctc_loss_gpu
  1555. autogen: _ctc_loss.out
  1556. tags: dynamic_output_shape # the shape of second output is data dependent
  1557. - func: _ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)
  1558. dispatch:
  1559. CPU, CUDA: ctc_loss_tensor
  1560. autogen: _ctc_loss.Tensor_out
  1561. tags: dynamic_output_shape # the shape of second output is data dependent
  1562. - func: _ctc_loss_backward(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor
  1563. dispatch:
  1564. CPU: ctc_loss_backward_cpu
  1565. CUDA: ctc_loss_backward_gpu
  1566. autogen: _ctc_loss_backward.out
  1567. - func: _ctc_loss_backward.Tensor(Tensor grad, Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor
  1568. dispatch:
  1569. CPU, CUDA: ctc_loss_backward_tensor
  1570. - func: diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor
  1571. variants: function, method
  1572. dispatch:
  1573. CompositeExplicitAutogradNonFunctional: diag_embed
  1574. autogen: diag_embed.out
  1575. - func: diagflat(Tensor self, int offset=0) -> Tensor
  1576. variants: function, method
  1577. - func: diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a)
  1578. variants: function, method
  1579. dispatch:
  1580. CompositeExplicitAutograd: diagonal
  1581. - func: linalg_diagonal(Tensor(a) A, *, int offset=0, int dim1=-2, int dim2=-1) -> Tensor(a)
  1582. python_module: linalg
  1583. variants: function
  1584. - func: diagonal.Dimname(Tensor(a) self, *, Dimname outdim, Dimname dim1, Dimname dim2, int offset=0) -> Tensor(a)
  1585. variants: function, method
  1586. - func: diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2) -> Tensor
  1587. variants: function
  1588. device_check: NoCheck
  1589. device_guard: False
  1590. dispatch:
  1591. CompositeExplicitAutograd: diagonal_backward_symint
  1592. autogen: diagonal_backward.out
  1593. - func: fill_diagonal_(Tensor(a!) self, Scalar fill_value, bool wrap=False) -> Tensor(a!)
  1594. variants: method
  1595. - func: diff(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> Tensor
  1596. variants: function, method
  1597. - func: diff.out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!)
  1598. variants: function
  1599. - func: gradient.scalarint(Tensor self, *, Scalar? spacing=None, int? dim=None, int edge_order=1) -> Tensor[]
  1600. variants: function
  1601. - func: gradient.scalararray(Tensor self, *, Scalar spacing, int[] dim, int edge_order=1) -> Tensor[]
  1602. variants: function
  1603. - func: gradient.array(Tensor self, *, int[] dim, int edge_order=1) -> Tensor[]
  1604. variants: function
  1605. - func: gradient.scalarrayint(Tensor self, *, Scalar[] spacing, int? dim=None, int edge_order=1) -> Tensor[]
  1606. variants: function
  1607. - func: gradient.scalarrayarray(Tensor self, *, Scalar[] spacing, int[] dim, int edge_order=1) -> Tensor[]
  1608. variants: function
  1609. - func: gradient.tensorarrayint(Tensor self, *, Tensor[] spacing, int? dim=None, int edge_order=1) -> Tensor[]
  1610. variants: function
  1611. - func: gradient.tensorarray(Tensor self, *, Tensor[] spacing, int[] dim, int edge_order=1) -> Tensor[]
  1612. variants: function
  1613. - func: div.Tensor(Tensor self, Tensor other) -> Tensor
  1614. device_check: NoCheck # TensorIterator
  1615. variants: function, method
  1616. structured_delegate: div.out
  1617. dispatch:
  1618. SparseCPU, SparseCUDA: div_sparse
  1619. ZeroTensor: div_zerotensor
  1620. NestedTensorCPU, NestedTensorCUDA: NestedTensor_div_Tensor
  1621. tags: [core, pointwise]
  1622. - func: div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
  1623. device_check: NoCheck # TensorIterator
  1624. variants: method
  1625. structured_delegate: div.out
  1626. dispatch:
  1627. SparseCPU, SparseCUDA: div_sparse_
  1628. tags: pointwise
  1629. - func: div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  1630. device_check: NoCheck # TensorIterator
  1631. structured: True
  1632. structured_inherits: TensorIteratorBase
  1633. dispatch:
  1634. CPU, CUDA: div_out
  1635. MPS: div_out_mps
  1636. SparseCPU, SparseCUDA: div_out_sparse_zerodim
  1637. tags: pointwise
  1638. - func: div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor
  1639. device_check: NoCheck # TensorIterator
  1640. variants: function, method
  1641. structured_delegate: div.out_mode
  1642. dispatch:
  1643. SparseCPU, SparseCUDA: div_sparse
  1644. tags: pointwise
  1645. - func: div_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)
  1646. device_check: NoCheck # TensorIterator
  1647. variants: method
  1648. structured_delegate: div.out_mode
  1649. dispatch:
  1650. SparseCPU, SparseCUDA: div_sparse_
  1651. tags: pointwise
  1652. - func: div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)
  1653. device_check: NoCheck # TensorIterator
  1654. structured: True
  1655. structured_inherits: TensorIteratorBase
  1656. dispatch:
  1657. CPU, CUDA: div_out_mode
  1658. MPS: div_out_mode_mps
  1659. SparseCPU, SparseCUDA: div_out_sparse_zerodim
  1660. tags: pointwise
  1661. # For C++ only, until we have conversion from C++ numbers to Tensor
  1662. - func: div.Scalar(Tensor self, Scalar other) -> Tensor
  1663. device_check: NoCheck # TensorIterator
  1664. variants: function, method
  1665. dispatch:
  1666. CompositeExplicitAutograd: div
  1667. NestedTensorCPU, NestedTensorCUDA: NestedTensor_div_Scalar
  1668. tags: [core, pointwise]
  1669. - func: div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
  1670. device_check: NoCheck # TensorIterator
  1671. variants: method
  1672. dispatch:
  1673. CompositeExplicitAutograd: div_
  1674. autogen: div.Scalar_out
  1675. tags: pointwise
  1676. - func: div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor
  1677. variants: function, method
  1678. dispatch:
  1679. CompositeExplicitAutograd: div
  1680. tags: pointwise
  1681. - func: div_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)
  1682. variants: method
  1683. dispatch:
  1684. CompositeExplicitAutograd: div_
  1685. autogen: div.Scalar_mode_out
  1686. tags: pointwise
  1687. # divide, alias for div
  1688. - func: divide.Tensor(Tensor self, Tensor other) -> Tensor
  1689. variants: function, method
  1690. - func: divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
  1691. variants: method
  1692. - func: divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  1693. - func: divide.Scalar(Tensor self, Scalar other) -> Tensor
  1694. variants: function, method
  1695. - func: divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
  1696. variants: method
  1697. - func: divide.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor
  1698. variants: function, method
  1699. - func: divide_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)
  1700. variants: method
  1701. - func: divide.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)
  1702. - func: divide.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor
  1703. variants: function, method
  1704. - func: divide_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)
  1705. variants: method
  1706. # true_divide, an alias for div
  1707. - func: true_divide.Tensor(Tensor self, Tensor other) -> Tensor
  1708. device_check: NoCheck # TensorIterator
  1709. variants: function, method
  1710. tags: pointwise
  1711. - func: true_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
  1712. device_check: NoCheck # TensorIterator
  1713. variants: method
  1714. - func: true_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  1715. device_check: NoCheck # TensorIterator
  1716. - func: true_divide.Scalar(Tensor self, Scalar other) -> Tensor
  1717. device_check: NoCheck # TensorIterator
  1718. variants: function, method
  1719. - func: true_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
  1720. device_check: NoCheck # TensorIterator
  1721. variants: method
  1722. - func: dot(Tensor self, Tensor tensor) -> Tensor
  1723. variants: function, method
  1724. dispatch:
  1725. CPU: dot
  1726. CUDA: dot_cuda
  1727. MPS: dot_mps
  1728. - func: dot.out(Tensor self, Tensor tensor, *, Tensor(a!) out) -> Tensor(a!)
  1729. dispatch:
  1730. CompositeExplicitAutograd: dot_out
  1731. - func: vdot(Tensor self, Tensor other) -> Tensor
  1732. variants: function, method
  1733. dispatch:
  1734. CPU: vdot
  1735. CUDA: vdot_cuda
  1736. - func: vdot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  1737. dispatch:
  1738. CompositeExplicitAutograd: vdot_out
  1739. - func: einsum(str equation, Tensor[] tensors, *, int[]? path=None) -> Tensor
  1740. - func: embedding(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor
  1741. dispatch:
  1742. CompositeExplicitAutograd: embedding_symint
  1743. NestedTensorCPU, NestedTensorCUDA: NestedTensor_embedding
  1744. autogen: embedding.out
  1745. - func: embedding_backward(Tensor grad, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, bool sparse) -> Tensor
  1746. dispatch:
  1747. CompositeImplicitAutograd: embedding_backward_symint
  1748. - func: embedding_dense_backward(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq) -> Tensor
  1749. dispatch:
  1750. CPU: embedding_dense_backward_cpu
  1751. CUDA: embedding_dense_backward_cuda
  1752. MPS: embedding_dense_backward_mps
  1753. autogen: embedding_dense_backward.out
  1754. tags: core
  1755. - func: embedding_renorm_(Tensor(a!) self, Tensor indices, float max_norm, float norm_type) -> Tensor(a!)
  1756. dispatch:
  1757. CPU: embedding_renorm_cpu_
  1758. CUDA: embedding_renorm_cuda_
  1759. autogen: embedding_renorm, embedding_renorm.out
  1760. - func: embedding_sparse_backward(Tensor grad, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor
  1761. # NOTE [ embedding_bag Native Functions ]
  1762. # The `_embedding_bag.*` variants assume that input tensors except for `weight`,
  1763. # e.g. `indices` and `offsets` (and `offset2bag`), are contiguous.
  1764. # We really only need to enforce this for `_embedding_bag` (the forward) because
  1765. # the backward inputs are the same as forward ones.
  1766. # The above `embedding_bag` wrapper is created to achieve this, e.g.,
  1767. # applying indices = indices.contiguous().
  1768. # The backward functions apply a check that these input tensors are contiguous.
  1769. - func: _embedding_bag_forward_only(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor)
  1770. dispatch:
  1771. CPU: _embedding_bag_forward_only_cpu
  1772. CUDA: _embedding_bag_forward_only_cuda
  1773. autogen: _embedding_bag_forward_only.out
  1774. - func: _rowwise_prune(Tensor weight, Tensor mask, ScalarType compressed_indices_dtype) -> (Tensor, Tensor)
  1775. # row_stack is the alias of vstack
  1776. - func: row_stack(Tensor[] tensors) -> Tensor
  1777. - func: row_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
  1778. - func: embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False) -> (Tensor, Tensor, Tensor, Tensor)
  1779. # To keep backward and forward compatibility, and to avoid ambiguity with the
  1780. # original signature above, scale_grad_by_freq, mode, sparse,
  1781. # per_sample_weights, and include_last_offset parameters do not have default
  1782. # values. Once the original signature is removed, default values can be added.
  1783. - func: embedding_bag.padding_idx(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, bool include_last_offset, int? padding_idx) -> (Tensor, Tensor, Tensor, Tensor)
  1784. - func: _embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor)
  1785. dispatch:
  1786. CPU: _embedding_bag_cpu
  1787. CUDA: _embedding_bag_cuda
  1788. autogen: _embedding_bag.out
  1789. - func: _embedding_bag_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
  1790. dispatch:
  1791. CompositeImplicitAutograd: _embedding_bag_backward_symint
  1792. - func: _embedding_bag_sparse_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
  1793. dispatch:
  1794. CompositeImplicitAutograd: _embedding_bag_sparse_backward_symint
  1795. - func: _embedding_bag_dense_backward(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
  1796. dispatch:
  1797. CPU: _embedding_bag_dense_backward_cpu
  1798. CUDA: _embedding_bag_dense_backward_cuda
  1799. autogen: _embedding_bag_dense_backward.out
  1800. - func: _embedding_bag_per_sample_weights_backward(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1) -> Tensor
  1801. dispatch:
  1802. CPU: _embedding_bag_per_sample_weights_backward_cpu
  1803. CUDA: _embedding_bag_per_sample_weights_backward_cuda
  1804. autogen: _embedding_bag_per_sample_weights_backward.out
  1805. - func: empty.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
  1806. device_check: NoCheck
  1807. device_guard: False
  1808. dispatch:
  1809. CompositeExplicitAutograd: empty_names
  1810. autogen: empty.names_out
  1811. - func: empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
  1812. dispatch:
  1813. CPU: empty_cpu
  1814. CUDA: empty_cuda
  1815. MPS: empty_mps
  1816. Meta: empty_meta_symint
  1817. MkldnnCPU: empty_mkldnn
  1818. SparseCPU, SparseCUDA, SparseMeta: empty_sparse
  1819. SparseCsrCPU, SparseCsrCUDA: empty_sparse_compressed
  1820. QuantizedCPU, QuantizedCUDA, QuantizedMeta: empty_unknown_quantized
  1821. # We do not make new_empty a composite that calls into new_empty_strided, as the strided version
  1822. # is significantly more difficult to implement by different backends
  1823. - func: new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  1824. variants: method
  1825. dispatch:
  1826. CompositeExplicitAutograd: new_empty_symint
  1827. autogen: new_empty.out
  1828. - func: new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  1829. variants: method
  1830. dispatch:
  1831. CompositeExplicitAutogradNonFunctional: new_empty_strided_symint
  1832. autogen: new_empty_strided.out
  1833. - func: new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  1834. variants: method
  1835. dispatch:
  1836. # NB: Although this composite mutates on the inside, it is
  1837. # non-differentiable so NonFunctional doesn't apply
  1838. CompositeExplicitAutograd: new_full
  1839. autogen: new_full.out
  1840. - func: new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  1841. variants: method
  1842. dispatch:
  1843. # NB: Although this composite mutates on the inside, it is
  1844. # non-differentiable so NonFunctional doesn't apply
  1845. CompositeExplicitAutograd: new_zeros
  1846. autogen: new_zeros.out
  1847. - func: new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  1848. variants: method
  1849. dispatch:
  1850. # NB: Although this composite mutates on the inside, it is
  1851. # non-differentiable so NonFunctional doesn't apply
  1852. CompositeExplicitAutograd: new_ones
  1853. autogen: new_ones.out
  1854. # other overrides are to provide a more helpful error message that dtype is required
  1855. - func: _empty_affine_quantized(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor
  1856. dispatch:
  1857. CPU: empty_affine_quantized_other_backends_stub
  1858. QuantizedCPU, QuantizedCUDA: empty_affine_quantized
  1859. autogen: _empty_affine_quantized.out
  1860. # it's a factory function receiving a tensor argument, thus overriding explicitly
  1861. # other overrides are to provide a more helpful error message that dtype is required
  1862. - func: _empty_per_channel_affine_quantized(int[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor
  1863. category_override: factory
  1864. dispatch:
  1865. CPU: empty_per_channel_affine_quantized_other_backends_stub
  1866. QuantizedCPU, QuantizedCUDA: empty_per_channel_affine_quantized
  1867. autogen: _empty_per_channel_affine_quantized.out
  1868. - func: resize_(Tensor(a!) self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!)
  1869. use_const_ref_for_mutable_tensors: True
  1870. variants: method
  1871. device_check: NoCheck
  1872. device_guard: False
  1873. tags: inplace_view
  1874. dispatch:
  1875. Meta: resize__symint
  1876. CPU: resize_
  1877. CUDA: resize_cuda_
  1878. MPS: resize_mps_
  1879. QuantizedCPU: quantized_resize_cpu_
  1880. SparseCsrCPU, SparseCsrCUDA: resize_sparse_csr_
  1881. autogen: resize, resize.out
  1882. # This is a utility function to enable users to resize out tensor while registering kernels for out variants.
  1883. # Eventually, we can consider exposing `resize_output` as a public API to ship it with python op registration
  1884. # to make it easy to register out variants for ops.
  1885. - func: _resize_output_(Tensor(a!) self, int[] size, Device device) -> Tensor(a!)
  1886. use_const_ref_for_mutable_tensors: True
  1887. variants: function
  1888. dispatch:
  1889. Meta: _resize_output_
  1890. autogen: _resize_output, _resize_output.out
  1891. - func: empty_quantized(int[] size, Tensor qtensor, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
  1892. category_override: factory
  1893. variants: function
  1894. dispatch:
  1895. QuantizedCPU, QuantizedCUDA: empty_quantized
  1896. autogen: empty_quantized.out
  1897. - func: empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
  1898. device_check: NoCheck
  1899. device_guard: False
  1900. - func: empty_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
  1901. device_check: NoCheck
  1902. device_guard: False
  1903. dispatch:
  1904. CompositeExplicitAutograd: empty_like
  1905. QuantizedCPU, QuantizedCUDA: empty_like_quantized
  1906. SparseCPU, SparseCUDA, SparseMeta: empty_like_sparse_coo
  1907. SparseCsrCPU, SparseCsrCUDA: empty_like_sparse_csr
  1908. NestedTensorCPU, NestedTensorCUDA: empty_like_nested
  1909. autogen: empty_like.out
  1910. - func: empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  1911. dispatch:
  1912. CPU: empty_strided_cpu
  1913. CUDA: empty_strided_cuda
  1914. MPS: empty_strided_mps
  1915. Meta: empty_strided_meta_symint
  1916. QuantizedCPU, QuantizedCUDA: empty_strided_unknown_quantized
  1917. autogen: empty_strided.out
  1918. tags: core
  1919. - func: erf(Tensor self) -> Tensor
  1920. device_check: NoCheck # TensorIterator
  1921. structured_delegate: erf.out
  1922. variants: function, method
  1923. dispatch:
  1924. SparseCPU, SparseCUDA: erf_sparse
  1925. SparseCsrCPU, SparseCsrCUDA: erf_sparse_csr
  1926. tags: [core, pointwise]
  1927. - func: erf_(Tensor(a!) self) -> Tensor(a!)
  1928. device_check: NoCheck # TensorIterator
  1929. structured_delegate: erf.out
  1930. variants: function, method
  1931. dispatch:
  1932. SparseCPU, SparseCUDA: erf_sparse_
  1933. SparseCsrCPU, SparseCsrCUDA: erf_sparse_csr_
  1934. tags: pointwise
  1935. - func: erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  1936. device_check: NoCheck # TensorIterator
  1937. structured: True
  1938. structured_inherits: TensorIteratorBase
  1939. dispatch:
  1940. CPU, CUDA: erf_out
  1941. MPS: erf_out_mps
  1942. SparseCPU, SparseCUDA: erf_sparse_out
  1943. SparseCsrCPU, SparseCsrCUDA: erf_sparse_csr_out
  1944. tags: pointwise
  1945. - func: erfc(Tensor self) -> Tensor
  1946. device_check: NoCheck # TensorIterator
  1947. structured_delegate: erfc.out
  1948. variants: function, method
  1949. tags: pointwise
  1950. - func: erfc_(Tensor(a!) self) -> Tensor(a!)
  1951. device_check: NoCheck # TensorIterator
  1952. structured_delegate: erfc.out
  1953. variants: function, method
  1954. tags: pointwise
  1955. - func: erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  1956. device_check: NoCheck # TensorIterator
  1957. structured: True
  1958. structured_inherits: TensorIteratorBase
  1959. dispatch:
  1960. CPU, CUDA: erfc_out
  1961. tags: pointwise
  1962. - func: exp(Tensor self) -> Tensor
  1963. device_check: NoCheck # TensorIterator
  1964. structured_delegate: exp.out
  1965. variants: function, method
  1966. tags: [core, pointwise]
  1967. - func: exp_(Tensor(a!) self) -> Tensor(a!)
  1968. device_check: NoCheck # TensorIterator
  1969. structured_delegate: exp.out
  1970. variants: function, method
  1971. tags: pointwise
  1972. - func: exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  1973. device_check: NoCheck # TensorIterator
  1974. structured: True
  1975. structured_inherits: TensorIteratorBase
  1976. dispatch:
  1977. CPU, CUDA: exp_out
  1978. MPS: exp_out_mps
  1979. tags: pointwise
  1980. - func: exp2(Tensor self) -> Tensor
  1981. structured_delegate: exp2.out
  1982. variants: function, method
  1983. tags: pointwise
  1984. - func: exp2_(Tensor(a!) self) -> Tensor(a!)
  1985. structured_delegate: exp2.out
  1986. variants: function, method
  1987. tags: pointwise
  1988. - func: exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  1989. structured: True
  1990. structured_inherits: TensorIteratorBase
  1991. dispatch:
  1992. CPU, CUDA: exp2_out
  1993. MPS: exp2_out_mps
  1994. tags: pointwise
  1995. - func: expm1(Tensor self) -> Tensor
  1996. device_check: NoCheck # TensorIterator
  1997. structured_delegate: expm1.out
  1998. variants: function, method
  1999. dispatch:
  2000. SparseCPU, SparseCUDA: expm1_sparse
  2001. SparseCsrCPU, SparseCsrCUDA: expm1_sparse_csr
  2002. tags: pointwise
  2003. - func: expm1_(Tensor(a!) self) -> Tensor(a!)
  2004. device_check: NoCheck # TensorIterator
  2005. structured_delegate: expm1.out
  2006. variants: function, method
  2007. dispatch:
  2008. SparseCPU, SparseCUDA: expm1_sparse_
  2009. SparseCsrCPU, SparseCsrCUDA: expm1_sparse_csr_
  2010. tags: pointwise
  2011. - func: expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  2012. device_check: NoCheck # TensorIterator
  2013. structured: True
  2014. structured_inherits: TensorIteratorBase
  2015. dispatch:
  2016. CPU, CUDA: expm1_out
  2017. MPS: expm1_out_mps
  2018. SparseCPU, SparseCUDA: expm1_sparse_out
  2019. SparseCsrCPU, SparseCsrCUDA: expm1_sparse_csr_out
  2020. tags: pointwise
  2021. - func: expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a)
  2022. variants: method # This is method-only to match the previous tensor API. In the future we could make this a function too.
  2023. device_check: NoCheck
  2024. device_guard: False
  2025. dispatch:
  2026. CompositeExplicitAutograd: expand
  2027. tags: core
  2028. - func: expand_as(Tensor(a) self, Tensor other) -> Tensor(a)
  2029. variants: method # This is method-only to match the previous tensor API. In the future we could make this a function too.
  2030. device_check: NoCheck
  2031. device_guard: False
  2032. # decomposes to eye.m
  2033. - func: eye(int n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  2034. dispatch:
  2035. CompositeExplicitAutograd: eye
  2036. - func: eye.m(int n, int m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  2037. dispatch:
  2038. CompositeExplicitAutograd: eye
  2039. - func: eye.out(int n, *, Tensor(a!) out) -> Tensor(a!)
  2040. dispatch:
  2041. CPU, Meta: eye_out_cpu
  2042. CUDA: eye_out_cuda
  2043. MPS: eye_out_mps
  2044. - func: eye.m_out(int n, int m, *, Tensor(a!) out) -> Tensor(a!)
  2045. dispatch:
  2046. CPU, Meta: eye_out_cpu
  2047. CUDA: eye_out_cuda
  2048. MPS: eye_out_mps
  2049. - func: flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a)
  2050. variants: function, method
  2051. - func: flatten.named_out_dim(Tensor(a) self, int start_dim, int end_dim, Dimname out_dim) -> Tensor(a)
  2052. variants: function, method
  2053. - func: flatten.using_names(Tensor(a) self, Dimname start_dim, Dimname end_dim, Dimname out_dim) -> Tensor(a)
  2054. variants: function, method
  2055. - func: flatten.DimnameList(Tensor(a) self, Dimname[] dims, Dimname out_dim) -> Tensor(a)
  2056. variants: function, method
  2057. - func: unflatten.int(Tensor(a) self, int dim, int[] sizes) -> Tensor(a)
  2058. variants: function, method
  2059. - func: unflatten.Dimname(Tensor(a) self, Dimname dim, int[] sizes, Dimname[] names) -> Tensor(a)
  2060. variants: function, method
  2061. - func: fill.Scalar(Tensor self, Scalar value) -> Tensor
  2062. variants: function
  2063. dispatch:
  2064. CompositeExplicitAutograd: fill
  2065. tags: core
  2066. - func: fill.Tensor(Tensor self, Tensor value) -> Tensor
  2067. variants: function
  2068. dispatch:
  2069. CompositeExplicitAutograd: fill
  2070. - func: fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!)
  2071. device_check: NoCheck # TensorIterator
  2072. variants: function, method
  2073. dispatch:
  2074. CPU, CUDA: fill_
  2075. MPS: fill_scalar_mps
  2076. QuantizedCPU, QuantizedCUDA: fill_quantized_
  2077. Meta: fill_meta_
  2078. SparseCsrCPU, SparseCsrCUDA: fill_sparse_csr_
  2079. NestedTensorCPU, NestedTensorCUDA: fill_nested_
  2080. autogen: fill.Scalar_out
  2081. - func: fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!)
  2082. device_check: NoCheck # TensorIterator
  2083. variants: function, method
  2084. dispatch:
  2085. CPU, CUDA: fill_
  2086. MPS: fill_tensor_mps_
  2087. QuantizedCPU, QuantizedCUDA: fill_quantized_
  2088. Meta: fill_meta_
  2089. NestedTensorCPU, NestedTensorCUDA: fill_nested_
  2090. autogen: fill.Tensor_out
  2091. - func: floor(Tensor self) -> Tensor
  2092. device_check: NoCheck # TensorIterator
  2093. structured_delegate: floor.out
  2094. variants: function, method
  2095. dispatch:
  2096. SparseCPU, SparseCUDA: floor_sparse
  2097. SparseCsrCPU, SparseCsrCUDA: floor_sparse_csr
  2098. tags: [core, pointwise]
  2099. - func: floor_(Tensor(a!) self) -> Tensor(a!)
  2100. device_check: NoCheck # TensorIterator
  2101. structured_delegate: floor.out
  2102. variants: function, method
  2103. dispatch:
  2104. SparseCPU, SparseCUDA: floor_sparse_
  2105. SparseCsrCPU, SparseCsrCUDA: floor_sparse_csr_
  2106. tags: pointwise
  2107. - func: floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  2108. device_check: NoCheck # TensorIterator
  2109. structured: True
  2110. structured_inherits: TensorIteratorBase
  2111. dispatch:
  2112. CPU, CUDA: floor_out
  2113. MPS: floor_out_mps
  2114. SparseCPU, SparseCUDA: floor_sparse_out
  2115. SparseCsrCPU, SparseCsrCUDA: floor_sparse_csr_out
  2116. tags: pointwise
  2117. - func: floor_divide(Tensor self, Tensor other) -> Tensor
  2118. device_check: NoCheck # TensorIterator
  2119. variants: function, method
  2120. dispatch:
  2121. CPU, CUDA: floor_divide
  2122. MPS: floor_divide_mps
  2123. SparseCPU, SparseCUDA: floor_divide_sparse
  2124. - func: floor_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
  2125. device_check: NoCheck # TensorIterator
  2126. variants: method
  2127. dispatch:
  2128. CPU, CUDA: floor_divide_
  2129. MPS: floor_divide_mps_
  2130. SparseCPU, SparseCUDA: floor_divide_sparse_
  2131. - func: floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  2132. device_check: NoCheck # TensorIterator
  2133. dispatch:
  2134. CPU, CUDA: floor_divide_out
  2135. MPS: floor_divide_out_mps
  2136. SparseCPU, SparseCUDA: floor_divide_out_sparse_zerodim
  2137. - func: floor_divide.Scalar(Tensor self, Scalar other) -> Tensor
  2138. device_check: NoCheck # TensorIterator
  2139. variants: function, method
  2140. - func: floor_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
  2141. device_check: NoCheck # TensorIterator
  2142. variants: method
  2143. - func: frac(Tensor self) -> Tensor
  2144. device_check: NoCheck # TensorIterator
  2145. structured_delegate: frac.out
  2146. variants: function, method
  2147. dispatch:
  2148. SparseCPU, SparseCUDA: frac_sparse
  2149. SparseCsrCPU, SparseCsrCUDA: frac_sparse_csr
  2150. tags: pointwise
  2151. - func: frac_(Tensor(a!) self) -> Tensor(a!)
  2152. device_check: NoCheck # TensorIterator
  2153. structured_delegate: frac.out
  2154. variants: function, method
  2155. dispatch:
  2156. SparseCPU, SparseCUDA: frac_sparse_
  2157. SparseCsrCPU, SparseCsrCUDA: frac_sparse_csr_
  2158. tags: pointwise
  2159. - func: frac.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  2160. device_check: NoCheck # TensorIterator
  2161. structured: True
  2162. structured_inherits: TensorIteratorBase
  2163. dispatch:
  2164. CPU, CUDA: frac_out
  2165. MPS: frac_out_mps
  2166. SparseCPU, SparseCUDA: frac_sparse_out
  2167. SparseCsrCPU, SparseCsrCUDA: frac_sparse_csr_out
  2168. tags: pointwise
  2169. - func: full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  2170. device_check: NoCheck
  2171. device_guard: False
  2172. dispatch:
  2173. CompositeExplicitAutograd: full
  2174. autogen: full.names_out
  2175. - func: full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  2176. dispatch:
  2177. CompositeExplicitAutograd: full
  2178. tags: core
  2179. - func: full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)
  2180. dispatch:
  2181. CompositeExplicitAutograd: full_out
  2182. - func: full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
  2183. dispatch:
  2184. # NB: Although this composite mutates on the inside, it is
  2185. # non-differentiable so NonFunctional doesn't apply
  2186. CompositeExplicitAutograd: full_like
  2187. autogen: full_like.out
  2188. - func: from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  2189. dispatch:
  2190. CPU: from_file
  2191. autogen: from_file.out
  2192. - func: gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  2193. structured: True
  2194. structured_inherits: TensorIteratorBase
  2195. dispatch:
  2196. CPU, CUDA: gcd_out
  2197. tags: pointwise
  2198. - func: gcd(Tensor self, Tensor other) -> Tensor
  2199. structured_delegate: gcd.out
  2200. variants: function, method
  2201. tags: pointwise
  2202. - func: gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!)
  2203. structured_delegate: gcd.out
  2204. variants: function, method
  2205. - func: lcm.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  2206. structured: True
  2207. structured_inherits: TensorIteratorBase
  2208. dispatch:
  2209. CPU, CUDA: lcm_out
  2210. tags: pointwise
  2211. - func: lcm(Tensor self, Tensor other) -> Tensor
  2212. structured_delegate: lcm.out
  2213. variants: function, method
  2214. tags: pointwise
  2215. - func: lcm_(Tensor(a!) self, Tensor other) -> Tensor(a!)
  2216. structured_delegate: lcm.out
  2217. variants: function, method
  2218. # NOTE [ grid_sampler Native Functions ]
  2219. # `grid_sampler` is _supposed to_ do all the shape checking and then dispatch to
  2220. # one of `cudnn_grid_sampler`, `grid_sampler_2d`, or `grid_sampler_3d`, each of
  2221. # which has the corresponding backward defined as native functions as well.
  2222. # However, we do shape checking everywhere for now since each of the mentioned
  2223. # functions can be called directly, which will lead to crashes otherwise.
  2224. # See https://github.com/pytorch/pytorch/issues/73187 for more information.
  2225. #
  2226. # There is also _grid_sampler_2d_backward_cpu_fallback which is an
  2227. # implementation detail of grid_sampler_2d and is only exposed here for testing
  2228. # purposes.
  2229. #
  2230. # Additionally, arguments `padding_mode` and `interpolation_mode` are cast to
  2231. # enums defined in `native/GridSampler.h`. `cudnn_grid_sampler` doesn't take in
  2232. # `interpolation_mode` because it only supports Bilinear interpolation mode.
  2233. # Nor does it take in `align_corners` because it only supports the mode
  2234. # `align_corners = True`.
  2235. - func: grid_sampler(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
  2236. - func: grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
  2237. dispatch:
  2238. CPU, QuantizedCPU: grid_sampler_2d_cpu
  2239. CUDA: grid_sampler_2d_cuda
  2240. MPS: grid_sampler_2d_mps
  2241. autogen: grid_sampler_2d.out
  2242. tags: core
  2243. # `grid_sampler_2d_backward` takes in `output_mask` to optimize performance for
  2244. # the case where `input` doesn't require gradient. Gradient for `grid` is always
  2245. # computed (only `output_mask[0]` is checked by the implementations).
  2246. - func: grid_sampler_2d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor)
  2247. dispatch:
  2248. CPU: grid_sampler_2d_backward_cpu
  2249. CUDA: grid_sampler_2d_backward_cuda
  2250. autogen: grid_sampler_2d_backward.out
  2251. # See NOTE [ grid_sample CPU fallback ]
  2252. - func: _grid_sampler_2d_cpu_fallback(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
  2253. dispatch:
  2254. CompositeExplicitAutograd: _grid_sampler_2d_cpu_fallback
  2255. autogen: _grid_sampler_2d_cpu_fallback.out
  2256. - func: _grid_sampler_2d_cpu_fallback_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor, Tensor)
  2257. - func: grid_sampler_3d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
  2258. dispatch:
  2259. CPU: grid_sampler_3d_cpu
  2260. CUDA: grid_sampler_3d_cuda
  2261. autogen: grid_sampler_3d.out
  2262. # `grid_sampler_3d_backward` takes in `output_mask` to optimize performance for
  2263. # the case where `input` doesn't require gradient. Gradient for `grid` is always
  2264. # computed (only `output_mask[0]` is checked by the implementations).
  2265. - func: grid_sampler_3d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor)
  2266. dispatch:
  2267. CPU: grid_sampler_3d_backward_cpu
  2268. CUDA: grid_sampler_3d_backward_cuda
  2269. autogen: grid_sampler_3d_backward.out
  2270. - func: hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  2271. dispatch:
  2272. CompositeExplicitAutograd: hann_window
  2273. autogen: hann_window.out
  2274. - func: hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  2275. dispatch:
  2276. CompositeExplicitAutograd: hann_window
  2277. autogen: hann_window.periodic_out
  2278. - func: hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  2279. dispatch:
  2280. CompositeExplicitAutograd: hamming_window
  2281. autogen: hamming_window.out
  2282. - func: hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  2283. dispatch:
  2284. CompositeExplicitAutograd: hamming_window
  2285. autogen: hamming_window.periodic_out
  2286. - func: hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  2287. dispatch:
  2288. CompositeExplicitAutograd: hamming_window
  2289. autogen: hamming_window.periodic_alpha_out
  2290. - func: hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  2291. dispatch:
  2292. CompositeExplicitAutograd: hamming_window
  2293. autogen: hamming_window.periodic_alpha_beta_out
  2294. - func: kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  2295. dispatch:
  2296. CompositeExplicitAutograd: kaiser_window
  2297. autogen: kaiser_window.out
  2298. - func: kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  2299. dispatch:
  2300. CompositeExplicitAutograd: kaiser_window
  2301. autogen: kaiser_window.periodic_out
  2302. - func: kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  2303. dispatch:
  2304. CompositeExplicitAutograd: kaiser_window
  2305. autogen: kaiser_window.beta_out
  2306. - func: hinge_embedding_loss(Tensor self, Tensor target, float margin=1.0, int reduction=Mean) -> Tensor
  2307. - func: group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enabled=True) -> Tensor
  2308. - func: native_group_norm(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps) -> (Tensor, Tensor, Tensor)
  2309. dispatch:
  2310. CPU, CUDA: native_group_norm
  2311. CompositeExplicitAutograd: math_group_norm
  2312. autogen: native_group_norm.out
  2313. tags: core
  2314. - func: native_group_norm_backward(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
  2315. dispatch:
  2316. CPU, CUDA: native_group_norm_backward
  2317. autogen: native_group_norm_backward.out
  2318. tags: core
  2319. # Real to complex forward FFT
  2320. - func: _fft_r2c(Tensor self, int[] dim, int normalization, bool onesided) -> Tensor
  2321. variants: function
  2322. dispatch:
  2323. CPU: _fft_r2c_mkl
  2324. CUDA: _fft_r2c_cufft
  2325. - func: _fft_r2c.out(Tensor self, int[] dim, int normalization, bool onesided, *, Tensor(a!) out) -> Tensor(a!)
  2326. variants: function
  2327. dispatch:
  2328. CPU: _fft_r2c_mkl_out
  2329. CUDA: _fft_r2c_cufft_out
  2330. # Complex to real inverse FFT
  2331. - func: _fft_c2r(Tensor self, int[] dim, int normalization, int last_dim_size) -> Tensor
  2332. variants: function
  2333. dispatch:
  2334. CPU: _fft_c2r_mkl
  2335. CUDA: _fft_c2r_cufft
  2336. - func: _fft_c2r.out(Tensor self, int[] dim, int normalization, int last_dim_size, *, Tensor(a!) out) -> Tensor(a!)
  2337. variants: function
  2338. dispatch:
  2339. CPU: _fft_c2r_mkl_out
  2340. CUDA: _fft_c2r_cufft_out
  2341. # Standard complex to complex FFT (forward or backward)
  2342. - func: _fft_c2c(Tensor self, SymInt[] dim, int normalization, bool forward) -> Tensor
  2343. variants: function
  2344. dispatch:
  2345. CPU: _fft_c2c_mkl
  2346. CUDA: _fft_c2c_cufft
  2347. - func: _fft_c2c.out(Tensor self, SymInt[] dim, int normalization, bool forward, *, Tensor(a!) out) -> Tensor(a!)
  2348. variants: function
  2349. dispatch:
  2350. CPU: _fft_c2c_mkl_out
  2351. CUDA: _fft_c2c_cufft_out
  2352. - func: _validate_compressed_sparse_indices(bool is_crow, Tensor compressed_idx, Tensor plain_idx, int cdim, int dim, int nnz) -> ()
  2353. device_check: NoCheck
  2354. variants: function
  2355. dispatch:
  2356. CPU: _validate_compressed_sparse_indices_cpu
  2357. CUDA: _validate_compressed_sparse_indices_cuda
  2358. - func: _cufft_get_plan_cache_size(int device_index) -> int
  2359. - func: _cufft_get_plan_cache_max_size(int device_index) -> int
  2360. - func: _cufft_set_plan_cache_max_size(int device_index, int max_size) -> ()
  2361. - func: _cufft_clear_plan_cache(int device_index) -> ()
  2362. - func: index.Tensor(Tensor self, Tensor?[] indices) -> Tensor
  2363. device_check: NoCheck # TensorIterator
  2364. structured_delegate: index.Tensor_out
  2365. variants: function, method
  2366. dispatch:
  2367. QuantizedCPU: quantized_index
  2368. tags: dynamic_output_shape
  2369. # NB: This function is special-cased in tools/autograd/gen_variable_type.py
  2370. # NB: The following functions are declared in aten/src/ATen/templates/TensorBody.h and defined in aten/src/ATen/TensorIndexing.cpp:
  2371. # - Tensor Tensor::index(ArrayRef<TensorIndex> indices)
  2372. # - Tensor Tensor::index(std::initializer_list<TensorIndex> indices)
  2373. - func: index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!)
  2374. device_check: NoCheck
  2375. structured: True
  2376. structured_inherits: TensorIteratorBase
  2377. precomputed:
  2378. - indices -> DimVector sizes, DimVector strides
  2379. dispatch:
  2380. CPU, CUDA, MPS: index_out
  2381. - func: index_copy.out(Tensor self, int dim, Tensor index, Tensor source, *, Tensor(a!) out) -> Tensor(a!)
  2382. structured: True
  2383. variants: function
  2384. precomputed:
  2385. - dim -> int dim
  2386. dispatch:
  2387. CPU, CUDA: index_copy_out
  2388. - func: index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!)
  2389. variants: method
  2390. structured_delegate: index_copy.out
  2391. - func: index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor
  2392. variants: function, method
  2393. structured_delegate: index_copy.out
  2394. - func: index_copy_.dimname(Tensor(a!) self, Dimname dim, Tensor index, Tensor source) -> Tensor(a!)
  2395. variants: method
  2396. - func: index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor
  2397. variants: function, method
  2398. - func: index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!)
  2399. device_check: NoCheck # delegate to _index_put_impl_, which leverages TensorIterator
  2400. variants: function, method
  2401. dispatch:
  2402. CompositeExplicitAutograd: index_put_
  2403. autogen: index_put.out
  2404. # NB: The following functions are declared in aten/src/ATen/templates/TensorBody.h and defined in aten/src/ATen/TensorIndexing.cpp:
  2405. # - Tensor & Tensor::index_put_(ArrayRef<TensorIndex> indices, Tensor const & rhs)
  2406. # - Tensor & Tensor::index_put_(ArrayRef<TensorIndex> indices, Scalar v)
  2407. # - Tensor & Tensor::index_put_(std::initializer_list<TensorIndex> indices, Tensor const & rhs)
  2408. # - Tensor & Tensor::index_put_(std::initializer_list<TensorIndex> indices, Scalar v)
  2409. - func: index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor
  2410. device_check: NoCheck # delegate to _index_put_impl_ after clone, which leverages TensorIterator
  2411. variants: function, method
  2412. dispatch:
  2413. CompositeExplicitAutograd: index_put
  2414. - func: _index_put_impl_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor(a!)
  2415. device_check: NoCheck # TensorIterator
  2416. variants: function
  2417. dispatch:
  2418. CPU, CUDA, MPS: _index_put_impl_
  2419. QuantizedCPU: _index_put_impl_quantized_cpu_
  2420. QuantizedCUDA: _index_put_impl_quantized_cuda_
  2421. autogen: _index_put_impl, _index_put_impl.out
  2422. - func: instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor
  2423. variants: function
  2424. - func: isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor
  2425. variants: function, method
  2426. - func: isin.Tensor_Tensor_out(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)
  2427. variants: function
  2428. structured: True
  2429. dispatch:
  2430. CPU, CUDA: isin_Tensor_Tensor_out
  2431. - func: isin.Tensor_Tensor(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor
  2432. variants: function
  2433. structured_delegate: isin.Tensor_Tensor_out
  2434. - func: isin.Tensor_Scalar_out(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)
  2435. variants: function
  2436. structured: True
  2437. dispatch:
  2438. CPU, CUDA: isin_Tensor_Scalar_out
  2439. - func: isin.Tensor_Scalar(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False) -> Tensor
  2440. variants: function
  2441. structured_delegate: isin.Tensor_Scalar_out
  2442. - func: isin.Scalar_Tensor_out(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)
  2443. variants: function
  2444. structured: True
  2445. dispatch:
  2446. CPU, CUDA: isin_Scalar_Tensor_out
  2447. - func: isin.Scalar_Tensor(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor
  2448. variants: function
  2449. structured_delegate: isin.Scalar_Tensor_out
  2450. - func: isnan(Tensor self) -> Tensor
  2451. variants: function, method
  2452. device_check: NoCheck
  2453. device_guard: False
  2454. dispatch:
  2455. CPU, CUDA, MPS: isnan
  2456. SparseCPU, SparseCUDA: isnan_sparse
  2457. SparseCsrCPU, SparseCsrCUDA: isnan_sparse_csr
  2458. autogen: isnan.out
  2459. tags: [core, pointwise]
  2460. - func: is_distributed(Tensor self) -> bool
  2461. variants: function, method
  2462. device_check: NoCheck
  2463. device_guard: False
  2464. - func: is_floating_point(Tensor self) -> bool
  2465. variants: function, method
  2466. device_check: NoCheck
  2467. device_guard: False
  2468. manual_cpp_binding: True
  2469. - func: is_complex(Tensor self) -> bool
  2470. variants: function, method
  2471. device_check: NoCheck
  2472. device_guard: False
  2473. manual_cpp_binding: True
  2474. - func: is_conj(Tensor self) -> bool
  2475. variants: function, method
  2476. device_guard: False
  2477. manual_cpp_binding: True
  2478. - func: _is_zerotensor(Tensor self) -> bool
  2479. variants: function, method
  2480. device_guard: False
  2481. manual_cpp_binding: True
  2482. - func: is_neg(Tensor self) -> bool
  2483. variants: function, method
  2484. device_guard: False
  2485. manual_cpp_binding: True
  2486. - func: isreal(Tensor self) -> Tensor
  2487. variants: function, method
  2488. - func: is_nonzero(Tensor self) -> bool
  2489. variants: function, method
  2490. device_check: NoCheck
  2491. device_guard: False
  2492. - func: is_same_size(Tensor self, Tensor other) -> bool
  2493. variants: function, method
  2494. device_check: NoCheck
  2495. device_guard: False
  2496. dispatch:
  2497. NestedTensorCPU, NestedTensorCUDA: nested_is_same_size
  2498. CompositeExplicitAutograd: is_same_size
  2499. - func: is_signed(Tensor self) -> bool
  2500. variants: function, method
  2501. device_check: NoCheck
  2502. device_guard: False
  2503. manual_cpp_binding: True
  2504. - func: is_inference(Tensor self) -> bool
  2505. variants: function, method
  2506. device_check: NoCheck
  2507. device_guard: False
  2508. manual_cpp_binding: True
  2509. - func: kl_div(Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor
  2510. - func: kron(Tensor self, Tensor other) -> Tensor
  2511. variants: function, method
  2512. - func: kron.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  2513. - func: kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)
  2514. variants: function, method
  2515. dispatch:
  2516. CompositeExplicitAutograd: kthvalue
  2517. - func: kthvalue.values(Tensor self, int k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
  2518. dispatch:
  2519. CPU: kthvalue_out_cpu
  2520. CUDA: kthvalue_out_cuda
  2521. - func: kthvalue.dimname(Tensor self, int k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
  2522. variants: function, method
  2523. - func: kthvalue.dimname_out(Tensor self, int k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
  2524. - func: layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor
  2525. dispatch:
  2526. CompositeImplicitAutograd: layer_norm_symint
  2527. - func: native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor)
  2528. dispatch:
  2529. CPU: layer_norm_cpu
  2530. CUDA: layer_norm_cuda
  2531. MPS: layer_norm_mps
  2532. CompositeExplicitAutograd: math_native_layer_norm
  2533. NestedTensorCPU, NestedTensorCUDA: nested_layer_norm
  2534. autogen: native_layer_norm.out
  2535. tags: core
  2536. - func: native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
  2537. dispatch:
  2538. CPU: layer_norm_backward_cpu
  2539. CUDA: layer_norm_backward_cuda
  2540. MPS: layer_norm_backward_mps
  2541. autogen: native_layer_norm_backward.out
  2542. tags: core
  2543. - func: nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor
  2544. variants: function, method
  2545. dispatch:
  2546. CompositeExplicitAutograd: nan_to_num
  2547. SparseCPU, SparseCUDA: nan_to_num_sparse
  2548. tags: pointwise
  2549. - func: nan_to_num_(Tensor(a!) self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor(a!)
  2550. variants: function, method
  2551. dispatch:
  2552. CompositeExplicitAutograd: nan_to_num_
  2553. SparseCPU, SparseCUDA: nan_to_num_sparse_
  2554. tags: pointwise
  2555. - func: nan_to_num.out(Tensor self, float? nan=None, float? posinf=None, float? neginf=None, *, Tensor(a!) out) -> Tensor(a!)
  2556. dispatch:
  2557. CPU, CUDA: nan_to_num_out
  2558. MPS: nan_to_num_out_mps
  2559. SparseCPU, SparseCUDA: nan_to_num_sparse_out
  2560. tags: pointwise
  2561. - func: linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor
  2562. python_module: nn
  2563. dispatch:
  2564. CompositeImplicitAutograd: linear
  2565. NestedTensorCPU, NestedTensorCUDA: nested_linear
  2566. MPS: _mps_linear
  2567. - func: linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
  2568. dispatch:
  2569. NestedTensorCPU, NestedTensorCUDA: nested_linear_backward
  2570. MPS: mps_linear_backward
  2571. autogen: linear_backward.out
  2572. - func: linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)
  2573. python_module: nn
  2574. dispatch:
  2575. CompositeExplicitAutograd: linear_out
  2576. - func: mkldnn_linear(Tensor self, Tensor weight, Tensor? bias=None) -> Tensor
  2577. python_module: nn
  2578. dispatch:
  2579. MkldnnCPU: mkldnn_linear
  2580. autogen: mkldnn_linear.out
  2581. - func: mkldnn_linear_backward_input(int[] input_size, Tensor grad_output, Tensor weight) -> Tensor
  2582. dispatch:
  2583. MkldnnCPU: mkldnn_linear_backward_input
  2584. autogen: mkldnn_linear_backward_input.out
  2585. - func: mkldnn_linear_backward_weights(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined) -> (Tensor, Tensor)
  2586. dispatch:
  2587. MkldnnCPU: mkldnn_linear_backward_weights
  2588. autogen: mkldnn_linear_backward_weights.out
  2589. - func: mkldnn_linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
  2590. dispatch:
  2591. MkldnnCPU: mkldnn_linear_backward
  2592. autogen: mkldnn_linear_backward.out
  2593. - func: fbgemm_linear_int8_weight_fp32_activation(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor
  2594. - func: fbgemm_linear_int8_weight(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor
  2595. - func: fbgemm_linear_quantize_weight(Tensor input) -> (Tensor, Tensor, float, int)
  2596. - func: fbgemm_pack_gemm_matrix_fp16(Tensor input) -> Tensor
  2597. - func: fbgemm_linear_fp16_weight_fp32_activation(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor
  2598. - func: fbgemm_linear_fp16_weight(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor
  2599. - func: fbgemm_pack_quantized_matrix(Tensor input) -> Tensor
  2600. - func: fbgemm_pack_quantized_matrix.KN(Tensor input, int K, int N) -> Tensor
  2601. - func: ldexp.Tensor(Tensor self, Tensor other) -> Tensor
  2602. variants: function, method
  2603. - func: ldexp_(Tensor(a!) self, Tensor other) -> Tensor(a!)
  2604. variants: function, method
  2605. tags: pointwise
  2606. - func: ldexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  2607. tags: pointwise
  2608. - func: linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  2609. dispatch:
  2610. CompositeExplicitAutograd: linspace
  2611. - func: linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)
  2612. dispatch:
  2613. CPU, Meta: linspace_out
  2614. CUDA: linspace_cuda_out
  2615. MPS: linspace_out_mps
  2616. - func: log(Tensor self) -> Tensor
  2617. device_check: NoCheck # TensorIterator
  2618. structured_delegate: log.out
  2619. variants: function, method
  2620. tags: [core, pointwise]
  2621. - func: log_(Tensor(a!) self) -> Tensor(a!)
  2622. device_check: NoCheck # TensorIterator
  2623. structured_delegate: log.out
  2624. variants: function, method
  2625. tags: pointwise
  2626. - func: log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  2627. device_check: NoCheck # TensorIterator
  2628. structured: True
  2629. structured_inherits: TensorIteratorBase
  2630. dispatch:
  2631. CPU, CUDA: log_out
  2632. MPS: log_out_mps
  2633. tags: pointwise
  2634. - func: log10(Tensor self) -> Tensor
  2635. device_check: NoCheck # TensorIterator
  2636. structured_delegate: log10.out
  2637. variants: function, method
  2638. tags: pointwise
  2639. - func: log10_(Tensor(a!) self) -> Tensor(a!)
  2640. device_check: NoCheck # TensorIterator
  2641. structured_delegate: log10.out
  2642. variants: function, method
  2643. tags: pointwise
  2644. - func: log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  2645. device_check: NoCheck # TensorIterator
  2646. structured: True
  2647. structured_inherits: TensorIteratorBase
  2648. dispatch:
  2649. CPU, CUDA: log10_out
  2650. MPS: log10_out_mps
  2651. tags: pointwise
  2652. - func: log1p(Tensor self) -> Tensor
  2653. device_check: NoCheck # TensorIterator
  2654. structured_delegate: log1p.out
  2655. variants: function, method
  2656. dispatch:
  2657. SparseCPU, SparseCUDA: log1p_sparse
  2658. SparseCsrCPU, SparseCsrCUDA: log1p_sparse_csr
  2659. tags: pointwise
  2660. - func: log1p_(Tensor(a!) self) -> Tensor(a!)
  2661. device_check: NoCheck # TensorIterator
  2662. structured_delegate: log1p.out
  2663. variants: function, method
  2664. dispatch:
  2665. SparseCPU, SparseCUDA: log1p_sparse_
  2666. SparseCsrCPU, SparseCsrCUDA: log1p_sparse_csr_
  2667. tags: pointwise
  2668. - func: log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  2669. device_check: NoCheck # TensorIterator
  2670. structured: True
  2671. structured_inherits: TensorIteratorBase
  2672. dispatch:
  2673. CPU, CUDA: log1p_out
  2674. MPS: log1p_out_mps
  2675. SparseCPU, SparseCUDA: log1p_sparse_out
  2676. SparseCsrCPU, SparseCsrCUDA: log1p_sparse_csr_out
  2677. tags: pointwise
  2678. - func: log2(Tensor self) -> Tensor
  2679. device_check: NoCheck # TensorIterator
  2680. structured_delegate: log2.out
  2681. variants: function, method
  2682. tags: pointwise
  2683. - func: log2_(Tensor(a!) self) -> Tensor(a!)
  2684. device_check: NoCheck # TensorIterator
  2685. structured_delegate: log2.out
  2686. variants: function, method
  2687. tags: pointwise
  2688. - func: log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  2689. device_check: NoCheck # TensorIterator
  2690. structured: True
  2691. structured_inherits: TensorIteratorBase
  2692. dispatch:
  2693. CPU, CUDA: log2_out
  2694. MPS: log2_out_mps
  2695. tags: pointwise
  2696. - func: logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  2697. structured: True
  2698. structured_inherits: TensorIteratorBase
  2699. dispatch:
  2700. CPU, CUDA: logaddexp_out
  2701. MPS: logaddexp_out_mps
  2702. tags: pointwise
  2703. - func: logaddexp(Tensor self, Tensor other) -> Tensor
  2704. variants: method, function
  2705. structured_delegate: logaddexp.out
  2706. tags: pointwise
  2707. - func: logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  2708. structured: True
  2709. structured_inherits: TensorIteratorBase
  2710. dispatch:
  2711. CPU, CUDA: logaddexp2_out
  2712. MPS: logaddexp2_out_mps
  2713. tags: pointwise
  2714. - func: logaddexp2(Tensor self, Tensor other) -> Tensor
  2715. variants: method, function
  2716. structured_delegate: logaddexp2.out
  2717. tags: pointwise
  2718. - func: xlogy.Tensor(Tensor self, Tensor other) -> Tensor
  2719. device_check: NoCheck # TensorIterator
  2720. structured_delegate: xlogy.OutTensor
  2721. variants: function, method
  2722. tags: pointwise
  2723. - func: xlogy.Scalar_Self(Scalar self, Tensor other) -> Tensor
  2724. device_check: NoCheck # TensorIterator
  2725. variants: function
  2726. dispatch:
  2727. CompositeExplicitAutograd: xlogy
  2728. tags: pointwise
  2729. - func: xlogy.Scalar_Other(Tensor self, Scalar other) -> Tensor
  2730. device_check: NoCheck # TensorIterator
  2731. variants: function, method
  2732. dispatch:
  2733. CompositeExplicitAutograd: xlogy
  2734. tags: pointwise
  2735. # xlogy: inplace variant
  2736. - func: xlogy_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
  2737. device_check: NoCheck # TensorIterator
  2738. variants: function, method
  2739. structured_delegate: xlogy.OutTensor
  2740. tags: pointwise
  2741. - func: xlogy_.Scalar_Other(Tensor(a!) self, Scalar other) -> Tensor(a!)
  2742. device_check: NoCheck # TensorIterator
  2743. variants: function, method
  2744. dispatch:
  2745. CompositeExplicitAutograd: xlogy_
  2746. # xlogy: out variant
  2747. - func: xlogy.OutTensor(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  2748. device_check: NoCheck # TensorIterator
  2749. structured: True
  2750. structured_inherits: TensorIteratorBase
  2751. variants: function
  2752. dispatch:
  2753. CPU, CUDA: xlogy_out
  2754. tags: pointwise
  2755. - func: xlogy.OutScalar_Self(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  2756. device_check: NoCheck # TensorIterator
  2757. variants: function
  2758. dispatch:
  2759. CompositeExplicitAutograd: xlogy_out
  2760. tags: pointwise
  2761. - func: xlogy.OutScalar_Other(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
  2762. device_check: NoCheck # TensorIterator
  2763. variants: function
  2764. dispatch:
  2765. CompositeExplicitAutograd: xlogy_out
  2766. tags: pointwise
  2767. - func: logspace(Scalar start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  2768. dispatch:
  2769. CompositeExplicitAutograd: logspace
  2770. - func: logspace.out(Scalar start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)
  2771. dispatch:
  2772. CPU, Meta: logspace_out
  2773. CUDA: logspace_cuda_out
  2774. # log_softmax allows positional dtype, unlike most operators, because kwonly is BC-breaking when loading jit models.
  2775. - func: log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
  2776. variants: function, method
  2777. - func: log_softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)
  2778. variants: function
  2779. dispatch:
  2780. CompositeExplicitAutograd: log_softmax_out
  2781. - func: log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
  2782. variants: function, method
  2783. - func: _log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
  2784. structured_delegate: _log_softmax.out
  2785. tags: core
  2786. - func: _log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
  2787. structured: True
  2788. dispatch:
  2789. CPU: log_softmax_cpu_out
  2790. CUDA: log_softmax_cuda_out
  2791. MPS: log_softmax_mps_out
  2792. - func: _log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor
  2793. structured_delegate: _log_softmax_backward_data.out
  2794. - func: _log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) out) -> Tensor(a!)
  2795. structured: True
  2796. dispatch:
  2797. CPU: log_softmax_backward_cpu_out
  2798. CUDA: log_softmax_backward_cuda_out
  2799. MPS: log_softmax_backward_mps_out
  2800. - func: _logcumsumexp(Tensor self, int dim) -> Tensor
  2801. dispatch:
  2802. CPU: _logcumsumexp_cpu
  2803. CUDA: _logcumsumexp_cuda
  2804. - func: _logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
  2805. dispatch:
  2806. CPU: _logcumsumexp_out_cpu
  2807. CUDA: _logcumsumexp_out_cuda
  2808. - func: logcumsumexp(Tensor self, int dim) -> Tensor
  2809. variants: function, method
  2810. dispatch:
  2811. CompositeExplicitAutograd: logcumsumexp
  2812. - func: logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
  2813. dispatch:
  2814. CompositeExplicitAutograd: logcumsumexp_out
  2815. - func: logcumsumexp.dimname(Tensor self, Dimname dim) -> Tensor
  2816. variants: function, method
  2817. - func: logcumsumexp.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
  2818. - func: logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
  2819. device_check: NoCheck # TensorIterator
  2820. variants: function, method
  2821. dispatch:
  2822. CompositeExplicitAutograd: logsumexp
  2823. - func: logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
  2824. device_check: NoCheck # TensorIterator
  2825. dispatch:
  2826. # calls squeeze
  2827. CompositeExplicitAutogradNonFunctional: logsumexp_out
  2828. - func: logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor
  2829. device_check: NoCheck # TensorIterator
  2830. variants: function, method
  2831. - func: logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
  2832. device_check: NoCheck # TensorIterator
  2833. - func: margin_ranking_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor
  2834. - func: matmul(Tensor self, Tensor other) -> Tensor
  2835. variants: function, method
  2836. dispatch:
  2837. CompositeImplicitAutograd: matmul
  2838. NestedTensorCPU, NestedTensorCUDA: matmul_nested
  2839. - func: matmul_backward(Tensor grad, Tensor self, Tensor other, bool[2] mask) -> (Tensor, Tensor)
  2840. dispatch:
  2841. NestedTensorCPU, NestedTensorCUDA: matmul_backward_nested
  2842. autogen: matmul_backward.out
  2843. - func: matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  2844. dispatch:
  2845. CompositeImplicitAutograd: matmul_out
  2846. NestedTensorCPU, NestedTensorCUDA: matmul_out_nested
  2847. # Alias to linalg.matrix_power
  2848. - func: matrix_power(Tensor self, int n) -> Tensor
  2849. variants: function, method
  2850. # Alias to linalg.matrix_power
  2851. - func: matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!)
  2852. # Alias to linalg.matrix_exp
  2853. - func: matrix_exp(Tensor self) -> Tensor
  2854. variants: function, method
  2855. # This function should be deprecated in favor of differential_analytic_matrix_function in FunctionsManual.cpp
  2856. - func: matrix_exp_backward(Tensor self, Tensor grad) -> Tensor
  2857. # DEPRECATED: Use torch.aminmax instead
  2858. - func: _aminmax(Tensor self) -> (Tensor, Tensor)
  2859. dispatch:
  2860. CPU, CUDA: _aminmax_all
  2861. autogen: _aminmax.out
  2862. # DEPRECATED: Use torch.aminmax instead
  2863. - func: _aminmax.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor)
  2864. dispatch:
  2865. CPU, CUDA: _aminmax
  2866. autogen: _aminmax.dim_out
  2867. - func: aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max)
  2868. device_check: NoCheck # TensorIterator
  2869. structured_delegate: aminmax.out
  2870. variants: function, method
  2871. - func: aminmax.out(Tensor self, *, int? dim=None, bool keepdim=False, Tensor(a!) min, Tensor(b!) max) -> (Tensor(a!) min, Tensor(b!) max)
  2872. device_check: NoCheck # TensorIterator
  2873. structured: True
  2874. dispatch:
  2875. CPU, CUDA: aminmax_out
  2876. - func: _compute_linear_combination(Tensor input, Tensor coefficients) -> Tensor
  2877. dispatch:
  2878. CPU, CUDA: _compute_linear_combination
  2879. - func: _compute_linear_combination.out(Tensor input, Tensor coefficients, *, Tensor(a!) out) -> Tensor(a!)
  2880. dispatch:
  2881. CPU, CUDA: _compute_linear_combination_out
  2882. - func: max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
  2883. device_check: NoCheck # TensorIterator
  2884. structured_delegate: max.dim_max
  2885. variants: function, method
  2886. dispatch:
  2887. QuantizedCPU, QuantizedCUDA: qmax
  2888. tags: core
  2889. - func: max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)
  2890. device_check: NoCheck # TensorIterator
  2891. structured: True
  2892. precomputed:
  2893. - dim -> int dim
  2894. dispatch:
  2895. CPU, CUDA: max_out
  2896. MPS: max_out_mps
  2897. - func: max.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
  2898. device_check: NoCheck # TensorIterator
  2899. variants: function, method
  2900. - func: max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)
  2901. device_check: NoCheck # TensorIterator
  2902. - func: value_selecting_reduction_backward(Tensor grad, int dim, Tensor indices, SymInt[] sizes, bool keepdim) -> Tensor
  2903. variants: function
  2904. device_check: NoCheck
  2905. device_guard: False
  2906. dispatch:
  2907. CompositeImplicitAutograd: value_selecting_reduction_backward_symint
  2908. - func: amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor
  2909. variants: function, method
  2910. structured_delegate: amax.out
  2911. tags: core
  2912. - func: amax.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
  2913. structured: True
  2914. dispatch:
  2915. CPU, CUDA: amax_out
  2916. MPS: amax_out_mps
  2917. # Return: (Tensor output, Tensor indices)
  2918. - func: max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)
  2919. - func: max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor
  2920. - func: max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
  2921. dispatch:
  2922. CompositeImplicitAutograd: max_pool2d
  2923. MPS: mps_max_pool2d
  2924. - func: max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
  2925. dispatch:
  2926. MPS: mps_max_pool2d_backward
  2927. autogen: max_pool2d_backward.out
  2928. - func: mkldnn_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
  2929. dispatch:
  2930. MkldnnCPU: mkldnn_max_pool2d
  2931. autogen: mkldnn_max_pool2d.out
  2932. - func: mkldnn_max_pool2d_backward(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
  2933. dispatch:
  2934. MkldnnCPU: mkldnn_max_pool2d_backward
  2935. autogen: mkldnn_max_pool2d_backward.out
  2936. - func: mkldnn_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
  2937. dispatch:
  2938. MkldnnCPU: mkldnn_max_pool3d
  2939. autogen: mkldnn_max_pool3d.out
  2940. - func: mkldnn_max_pool3d_backward(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
  2941. dispatch:
  2942. MkldnnCPU: mkldnn_max_pool3d_backward
  2943. autogen: mkldnn_max_pool3d_backward.out
  2944. - func: quantized_max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor
  2945. dispatch:
  2946. QuantizedCPU: quantized_max_pool1d
  2947. autogen: quantized_max_pool1d.out
  2948. - func: quantized_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
  2949. dispatch:
  2950. QuantizedCPU: quantized_max_pool2d
  2951. QuantizedCUDA: quantized_max_pool2d_cudnn
  2952. autogen: quantized_max_pool2d.out
  2953. - func: max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
  2954. # The CPU and GPU dispatch variants are named weirdly here because otherwise there
  2955. # are namespacing issues in C++
  2956. - func: mean(Tensor self, *, ScalarType? dtype=None) -> Tensor
  2957. device_check: NoCheck # TensorIterator
  2958. variants: function, method
  2959. dispatch:
  2960. CompositeExplicitAutograd: mean
  2961. # For normal naming convention this should be `mean.out`. However since we already have `mean.out` we have to rename this.
  2962. # FIXME: fix CI jobs and re-enable this
  2963. #- func: mean.dtype_out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
  2964. # device_check: NoCheck # TensorIterator
  2965. # dispatch:
  2966. # CompositeExplicitAutograd: mean_dtype_out
  2967. - func: mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
  2968. structured_delegate: mean.out
  2969. device_check: NoCheck # TensorIterator
  2970. variants: function, method
  2971. dispatch:
  2972. QuantizedCPU: mean_quantized_cpu
  2973. tags: core
  2974. - func: mean.out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
  2975. structured: True
  2976. device_check: NoCheck # TensorIterator
  2977. dispatch:
  2978. CPU, CUDA: mean_out
  2979. MPS: mean_out_mps
  2980. QuantizedCPU: mean_out_quantized_cpu
  2981. - func: mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
  2982. device_check: NoCheck # TensorIterator
  2983. variants: function, method
  2984. - func: mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
  2985. device_check: NoCheck # TensorIterator
  2986. - func: nanmean(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
  2987. device_check: NoCheck # Composite
  2988. variants: function, method
  2989. - func: nanmean.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
  2990. device_check: NoCheck # Composite
  2991. - func: median(Tensor self) -> Tensor
  2992. variants: function, method
  2993. dispatch:
  2994. CPU: median_cpu
  2995. CUDA: median_cuda
  2996. MPS: median_mps
  2997. autogen: median.out
  2998. - func: median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
  2999. variants: function, method
  3000. dispatch:
  3001. CompositeExplicitAutograd: median
  3002. - func: median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
  3003. dispatch:
  3004. CPU: median_out_cpu
  3005. CUDA: median_out_cuda
  3006. MPS: median_out_mps
  3007. - func: median.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
  3008. variants: function, method
  3009. - func: median.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
  3010. - func: nanmedian(Tensor self) -> Tensor
  3011. variants: function, method
  3012. dispatch:
  3013. CPU: nanmedian_cpu
  3014. CUDA: nanmedian_cuda
  3015. autogen: nanmedian.out
  3016. - func: nanmedian.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
  3017. variants: function, method
  3018. dispatch:
  3019. CompositeExplicitAutograd: nanmedian
  3020. - func: nanmedian.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
  3021. dispatch:
  3022. CPU: nanmedian_out_cpu
  3023. CUDA: nanmedian_out_cuda
  3024. - func: nanmedian.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
  3025. variants: function, method
  3026. - func: nanmedian.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
  3027. - func: min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
  3028. device_check: NoCheck # TensorIterator
  3029. structured_delegate: min.dim_min
  3030. variants: function, method
  3031. dispatch:
  3032. QuantizedCPU, QuantizedCUDA: qmin
  3033. tags: core
  3034. - func: min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)
  3035. device_check: NoCheck # TensorIterator
  3036. structured: True
  3037. precomputed:
  3038. - dim -> int dim
  3039. dispatch:
  3040. CPU, CUDA: min_out
  3041. MPS: min_out_mps
  3042. - func: min.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
  3043. device_check: NoCheck # TensorIterator
  3044. variants: function, method
  3045. - func: min.names_dim_min(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)
  3046. device_check: NoCheck # TensorIterator
  3047. - func: amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor
  3048. variants: function, method
  3049. structured_delegate: amin.out
  3050. tags: core
  3051. - func: amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
  3052. structured: True
  3053. dispatch:
  3054. CPU, CUDA: amin_out
  3055. MPS: amin_out_mps
  3056. # TODO: Add this function to MPS dispatch key so that we avoid declaring it in
  3057. # native_functions.yaml
  3058. # https://github.com/pytorch/pytorch/issues/77394
  3059. - func: _mps_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups) -> Tensor
  3060. dispatch:
  3061. MPS: _mps_convolution
  3062. autogen: _mps_convolution.out
  3063. - func: mps_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
  3064. dispatch:
  3065. MPS: mps_convolution_backward
  3066. autogen: mps_convolution_backward.out
  3067. - func: mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups) -> Tensor
  3068. dispatch:
  3069. CompositeExplicitAutograd: mkldnn_convolution
  3070. autogen: mkldnn_convolution.out
  3071. - func: mkldnn_rnn_layer(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) -> (Tensor, Tensor, Tensor, Tensor)
  3072. dispatch:
  3073. CPU: mkldnn_rnn_layer
  3074. autogen: mkldnn_rnn_layer.out
  3075. - func: mkldnn_rnn_layer_backward(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor)
  3076. dispatch:
  3077. CPU: mkldnn_rnn_layer_backward
  3078. autogen: mkldnn_rnn_layer_backward.out
  3079. - func: miopen_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor)
  3080. dispatch:
  3081. CUDA: miopen_batch_norm
  3082. autogen: miopen_batch_norm.out
  3083. - func: miopen_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon) -> (Tensor, Tensor, Tensor)
  3084. dispatch:
  3085. CUDA: miopen_batch_norm_backward
  3086. autogen: miopen_batch_norm_backward.out
  3087. - func: miopen_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
  3088. dispatch:
  3089. CUDA: miopen_convolution
  3090. autogen: miopen_convolution.out
  3091. - func: miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
  3092. dispatch:
  3093. CUDA: miopen_convolution_transpose
  3094. autogen: miopen_convolution_transpose.out
  3095. - func: miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
  3096. dispatch:
  3097. CUDA: miopen_depthwise_convolution
  3098. autogen: miopen_depthwise_convolution.out
  3099. - func: miopen_convolution_relu(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor
  3100. dispatch:
  3101. CUDA: miopen_convolution_relu
  3102. - func: miopen_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor
  3103. dispatch:
  3104. CUDA: miopen_convolution_add_relu
  3105. - func: miopen_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
  3106. dispatch:
  3107. CUDA: miopen_rnn
  3108. autogen: miopen_rnn.out
  3109. - func: miopen_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])
  3110. dispatch:
  3111. CUDA: miopen_rnn_backward
  3112. autogen: miopen_rnn_backward.out
  3113. - func: mm(Tensor self, Tensor mat2) -> Tensor
  3114. structured_delegate: mm.out
  3115. variants: function, method
  3116. dispatch:
  3117. SparseCPU, SparseCUDA: _sparse_mm
  3118. SparseCsrCPU, SparseCsrCUDA: _sparse_csr_mm
  3119. tags: core
  3120. - func: mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
  3121. structured: True
  3122. dispatch:
  3123. CPU: mm_out_cpu
  3124. CUDA: mm_out_cuda
  3125. MPS: mm_out_mps
  3126. SparseCPU, SparseCUDA: _sparse_mm_out
  3127. SparseCsrCPU, SparseCsrCUDA: _sparse_csr_mm_out
  3128. - func: _sparse_mm(Tensor sparse, Tensor dense) -> Tensor
  3129. python_module: sparse
  3130. - func: _sparse_mm.reduce(Tensor sparse, Tensor dense, str reduce) -> Tensor
  3131. python_module: sparse
  3132. - func: _sparse_sparse_matmul(Tensor self, Tensor other) -> Tensor
  3133. dispatch:
  3134. SparseCPU: sparse_sparse_matmul_cpu
  3135. SparseCUDA: sparse_sparse_matmul_cuda
  3136. autogen: _sparse_sparse_matmul.out
  3137. - func: mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)
  3138. variants: function, method
  3139. dispatch:
  3140. CPU, CUDA: mode
  3141. - func: mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
  3142. dispatch:
  3143. CompositeExplicitAutograd: mode_out
  3144. - func: mode.dimname(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
  3145. variants: function, method
  3146. - func: mode.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
  3147. - func: mul.Tensor(Tensor self, Tensor other) -> Tensor
  3148. device_check: NoCheck # TensorIterator
  3149. structured_delegate: mul.out
  3150. variants: function, method
  3151. dispatch:
  3152. SparseCPU, SparseCUDA: mul_sparse
  3153. SparseCsrCPU, SparseCsrCUDA: mul_sparse_csr
  3154. MkldnnCPU: mkldnn_mul
  3155. ZeroTensor: mul_zerotensor
  3156. NestedTensorCPU, NestedTensorCUDA: NestedTensor_mul_Tensor
  3157. tags: [core, pointwise]
  3158. - func: mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
  3159. device_check: NoCheck # TensorIterator
  3160. structured_delegate: mul.out
  3161. variants: method
  3162. dispatch:
  3163. SparseCPU, SparseCUDA: mul_sparse_
  3164. SparseCsrCPU, SparseCsrCUDA: mul_sparse_csr_
  3165. MkldnnCPU: mkldnn_mul_
  3166. NestedTensorCPU, NestedTensorCUDA: NestedTensor_mul__Tensor
  3167. tags: pointwise
  3168. - func: mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  3169. device_check: NoCheck # TensorIterator
  3170. structured: True
  3171. structured_inherits: TensorIteratorBase
  3172. dispatch:
  3173. CPU, CUDA: mul_out
  3174. MPS: mul_out_mps
  3175. SparseCPU: mul_out_sparse_cpu
  3176. SparseCUDA: mul_out_sparse_cuda
  3177. SparseCsrCPU, SparseCsrCUDA: mul_out_sparse_csr
  3178. MkldnnCPU: mkldnn_mul_out
  3179. tags: pointwise
  3180. # For C++ only, until we have conversion from C++ numbers to Tensor
  3181. - func: mul.Scalar(Tensor self, Scalar other) -> Tensor
  3182. device_check: NoCheck # TensorIterator
  3183. variants: function, method
  3184. dispatch:
  3185. CompositeExplicitAutograd: mul
  3186. SparseCsrCPU, SparseCsrCUDA: mul_scalar_sparse_csr
  3187. NestedTensorCPU, NestedTensorCUDA: NestedTensor_mul_Scalar
  3188. tags: [core, pointwise]
  3189. - func: mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
  3190. device_check: NoCheck # TensorIterator
  3191. variants: method
  3192. dispatch:
  3193. CompositeExplicitAutograd: mul_
  3194. SparseCsrCPU, SparseCsrCUDA: mul__scalar_sparse_csr
  3195. NestedTensorCPU, NestedTensorCUDA: NestedTensor_mul__Scalar
  3196. autogen: mul.Scalar_out
  3197. tags: pointwise
  3198. # multiply, alias for mul
  3199. - func: multiply.Tensor(Tensor self, Tensor other) -> Tensor
  3200. variants: function, method
  3201. - func: multiply_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
  3202. variants: method
  3203. - func: multiply.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  3204. - func: multiply.Scalar(Tensor self, Scalar other) -> Tensor
  3205. variants: function, method
  3206. - func: multiply_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
  3207. variants: method
  3208. - func: mv(Tensor self, Tensor vec) -> Tensor
  3209. variants: function, method
  3210. dispatch:
  3211. CompositeExplicitAutograd: mv
  3212. SparseCPU, SparseCUDA: mv_sparse
  3213. - func: mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!)
  3214. dispatch:
  3215. CompositeExplicitAutograd: mv_out
  3216. - func: mvlgamma.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)
  3217. dispatch:
  3218. CPU, CUDA: mvlgamma_out
  3219. tags: pointwise
  3220. - func: mvlgamma(Tensor self, int p) -> Tensor
  3221. device_check: NoCheck # TensorIterator
  3222. variants: function, method
  3223. dispatch:
  3224. CompositeExplicitAutograd: mvlgamma
  3225. tags: pointwise
  3226. - func: mvlgamma_(Tensor(a!) self, int p) -> Tensor(a!)
  3227. device_check: NoCheck # TensorIterator
  3228. variants: method
  3229. dispatch:
  3230. CompositeExplicitAutograd: mvlgamma_
  3231. tags: pointwise
  3232. - func: narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor
  3233. variants: function, method
  3234. dispatch:
  3235. CPU: narrow_copy_dense_cpu
  3236. SparseCPU, SparseCUDA: narrow_copy_sparse
  3237. CompositeExplicitAutogradNonFunctional: narrow_copy_dense_symint
  3238. tags: view_copy
  3239. - func: narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!)
  3240. dispatch:
  3241. CPU: narrow_copy_dense_cpu_out
  3242. - func: narrow(Tensor(a) self, int dim, SymInt start, SymInt length) -> Tensor(a)
  3243. variants: function, method
  3244. device_check: NoCheck
  3245. device_guard: False
  3246. dispatch:
  3247. CompositeImplicitAutograd: narrow_symint
  3248. - func: narrow.Tensor(Tensor(a) self, int dim, Tensor start, SymInt length) -> Tensor(a)
  3249. variants: function, method
  3250. device_check: NoCheck
  3251. device_guard: False
  3252. dispatch:
  3253. CompositeImplicitAutograd: narrow_tensor_symint
  3254. - func: native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)
  3255. dispatch:
  3256. CPU: batch_norm_cpu
  3257. CUDA: batch_norm_cuda
  3258. MPS: batch_norm_mps
  3259. MkldnnCPU: mkldnn_batch_norm
  3260. tags: core
  3261. - func: native_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))
  3262. dispatch:
  3263. CUDA: batch_norm_cuda_out
  3264. MPS: batch_norm_mps_out
  3265. CPU: batch_norm_cpu_out
  3266. # TODO: In 2 weeks, we should make native_batch_norm composite implicit so that this correct schema percolates correctly through our dispatching
  3267. - func: _native_batch_norm_legit(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)
  3268. dispatch:
  3269. CPU: _batch_norm_legit_cpu
  3270. CUDA: _batch_norm_legit_cuda
  3271. MPS: _batch_norm_legit_mps
  3272. MkldnnCPU: _mkldnn_batch_norm_legit
  3273. autogen: _native_batch_norm_legit_functional
  3274. - func: _native_batch_norm_legit.out(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps, *, Tensor(d!) out, Tensor(e!) save_mean, Tensor(f!) save_invstd) -> (Tensor(d!), Tensor(e!), Tensor(f!))
  3275. dispatch:
  3276. CPU: _batch_norm_legit_cpu_out
  3277. CUDA: _batch_norm_legit_cuda_out
  3278. MPS: _batch_norm_legit_mps_out
  3279. - func: _native_batch_norm_legit.no_stats(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)
  3280. dispatch:
  3281. CPU: _batch_norm_legit_no_stats_cpu
  3282. CUDA: _batch_norm_legit_no_stats_cuda
  3283. MPS: _batch_norm_legit_no_stats_mps
  3284. MkldnnCPU: _mkldnn_batch_norm_legit_no_stats
  3285. tags: core
  3286. - func: _native_batch_norm_legit.no_stats_out(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))
  3287. dispatch:
  3288. CPU: _batch_norm_legit_no_stats_cpu_out
  3289. CUDA: _batch_norm_legit_no_stats_cuda_out
  3290. MPS: _batch_norm_legit_no_stats_mps_out
  3291. - func: batch_norm_stats(Tensor input, float eps) -> (Tensor, Tensor)
  3292. dispatch:
  3293. CUDA: batch_norm_stats_cuda
  3294. autogen: batch_norm_stats.out
  3295. - func: batch_norm_elemt(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps) -> Tensor
  3296. dispatch:
  3297. CUDA: batch_norm_elemt_cuda
  3298. - func: batch_norm_elemt.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps, *, Tensor(a!) out) -> Tensor(a!)
  3299. dispatch:
  3300. CUDA: batch_norm_elemt_cuda_out
  3301. # for backward compatibility
  3302. - func: batch_norm_gather_stats(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count) -> (Tensor, Tensor)
  3303. dispatch:
  3304. CUDA: batch_norm_gather_stats_cuda
  3305. autogen: batch_norm_gather_stats.out
  3306. - func: batch_norm_gather_stats_with_counts(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts) -> (Tensor, Tensor)
  3307. dispatch:
  3308. CUDA: batch_norm_gather_stats_with_counts_cuda
  3309. autogen: batch_norm_gather_stats_with_counts.out
  3310. - func: native_batch_norm_backward(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
  3311. dispatch:
  3312. CPU: batch_norm_backward_cpu
  3313. CUDA: batch_norm_backward_cuda
  3314. MPS: batch_norm_backward_mps
  3315. MkldnnCPU: mkldnn_batch_norm_backward
  3316. autogen: native_batch_norm_backward.out
  3317. - func: batch_norm_backward_reduce(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g) -> (Tensor, Tensor, Tensor, Tensor)
  3318. dispatch:
  3319. CUDA: batch_norm_backward_reduce_cuda
  3320. autogen: batch_norm_backward_reduce.out
  3321. - func: batch_norm_backward_elemt(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu, Tensor count) -> Tensor
  3322. dispatch:
  3323. CUDA: batch_norm_backward_elemt_cuda
  3324. autogen: batch_norm_backward_elemt.out
  3325. - func: batch_norm_update_stats(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum) -> (Tensor, Tensor)
  3326. dispatch:
  3327. CPU: batch_norm_update_stats_cpu
  3328. CUDA: batch_norm_update_stats_cuda
  3329. autogen: batch_norm_update_stats.out
  3330. - func: is_vulkan_available() -> bool
  3331. - func: _nnpack_available() -> bool
  3332. - func: _nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, int[2] stride=1) -> Tensor
  3333. variants: function
  3334. dispatch:
  3335. CompositeExplicitAutograd: _nnpack_spatial_convolution
  3336. autogen: _nnpack_spatial_convolution.out
  3337. - func: ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  3338. device_check: NoCheck
  3339. device_guard: False
  3340. dispatch:
  3341. CompositeExplicitAutograd: ones
  3342. autogen: ones.names_out
  3343. - func: ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  3344. dispatch:
  3345. CompositeExplicitAutograd: ones
  3346. - func: ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
  3347. dispatch:
  3348. CompositeExplicitAutograd: ones_out
  3349. - func: ones_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
  3350. dispatch:
  3351. # NB: Although this composite mutates on the inside, it is
  3352. # non-differentiable so NonFunctional doesn't apply
  3353. CompositeExplicitAutograd: ones_like
  3354. NestedTensorCPU, NestedTensorCUDA: ones_like
  3355. autogen: ones_like.out
  3356. - func: pairwise_distance(Tensor x1, Tensor x2, float p=2, float eps=1e-06, bool keepdim=False) -> Tensor
  3357. - func: cdist(Tensor x1, Tensor x2, float p=2, int? compute_mode=None) -> Tensor
  3358. - func: _euclidean_dist(Tensor x1, Tensor x2) -> Tensor
  3359. dispatch:
  3360. CompositeExplicitAutograd: _euclidean_dist
  3361. autogen: _euclidean_dist.out
  3362. - func: _cdist_forward(Tensor x1, Tensor x2, float p, int? compute_mode) -> Tensor
  3363. dispatch:
  3364. CPU, CUDA: _cdist_forward
  3365. MPS: _cdist_forward_mps
  3366. autogen: _cdist_forward.out
  3367. - func: _cdist_backward(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist) -> Tensor
  3368. dispatch:
  3369. CPU, CUDA: _cdist_backward
  3370. autogen: _cdist_backward.out
  3371. - func: pdist(Tensor self, float p=2) -> Tensor
  3372. - func: _pdist_forward(Tensor self, float p=2) -> Tensor
  3373. dispatch:
  3374. CPU, CUDA: _pdist_forward
  3375. autogen: _pdist_forward.out
  3376. - func: _pdist_backward(Tensor grad, Tensor self, float p, Tensor pdist) -> Tensor
  3377. dispatch:
  3378. CPU, CUDA: _pdist_backward
  3379. autogen: _pdist_backward.out
  3380. - func: cosine_similarity(Tensor x1, Tensor x2, int dim=1, float eps=1e-08) -> Tensor
  3381. variants: function
  3382. - func: permute(Tensor(a) self, int[] dims) -> Tensor(a)
  3383. variants: function, method
  3384. dispatch:
  3385. CompositeExplicitAutograd: permute
  3386. MPS: permute_mps
  3387. SparseCPU, SparseCUDA: permute_sparse_coo
  3388. tags: core
  3389. - func: movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)
  3390. variants: function, method
  3391. - func: movedim.int(Tensor(a) self, int source, int destination) -> Tensor(a)
  3392. variants: function, method
  3393. # moveaxis, alias for movedim
  3394. - func: moveaxis.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)
  3395. variants: function, method
  3396. - func: moveaxis.int(Tensor(a) self, int source, int destination) -> Tensor(a)
  3397. variants: function, method
  3398. # Only exposed from C++ -- in Python,
  3399. # we expose it as an attribute `T`, not a function.
  3400. #
  3401. # I'd like to name this "T" in C++ too, but
  3402. # calling a native function "T" causes undefined
  3403. # behavior on Windows, for reasons I don't understand
  3404. # (maybe related to capital letter collation somehow...)
  3405. - func: numpy_T(Tensor(a) self) -> Tensor(a)
  3406. variants: method
  3407. # Exposed on Python as an attribute 'H'
  3408. - func: matrix_H(Tensor(a) self) -> Tensor(a)
  3409. variants: method
  3410. # Exposed on Python as an attribute 'mT'
  3411. - func: mT(Tensor(a) self) -> Tensor(a)
  3412. variants: method
  3413. # Exposed on Python as an attribute 'mH'
  3414. - func: mH(Tensor(a) self) -> Tensor(a)
  3415. variants: method
  3416. - func: adjoint(Tensor(a) self) -> Tensor(a)
  3417. variants: function, method
  3418. - func: pixel_shuffle(Tensor self, int upscale_factor) -> Tensor
  3419. dispatch:
  3420. CPU: pixel_shuffle_cpu
  3421. CompositeExplicitAutogradNonFunctional: math_pixel_shuffle
  3422. autogen: pixel_shuffle.out
  3423. - func: pixel_unshuffle(Tensor self, int downscale_factor) -> Tensor
  3424. dispatch:
  3425. CPU: pixel_unshuffle_cpu
  3426. CompositeExplicitAutogradNonFunctional: math_pixel_unshuffle
  3427. autogen: pixel_unshuffle.out
  3428. - func: channel_shuffle(Tensor self, int groups) -> Tensor
  3429. dispatch:
  3430. CPU: channel_shuffle
  3431. QuantizedCPU: channel_shuffle_quantized_cpu
  3432. autogen: channel_shuffle.out
  3433. - func: native_channel_shuffle(Tensor self, int groups) -> Tensor
  3434. dispatch:
  3435. CPU: channel_shuffle_cpu
  3436. CompositeImplicitAutograd: math_channel_shuffle
  3437. - func: is_pinned(Tensor self, Device? device=None) -> bool
  3438. variants: method
  3439. dispatch:
  3440. CUDA: is_pinned_cuda
  3441. MPS: is_pinned_mps
  3442. CompositeExplicitAutograd: is_pinned_default
  3443. # TODO: add a copy kwarg that guarantees that the tensor is put into fresh
  3444. # pinned memory
  3445. - func: pin_memory(Tensor(a) self, Device? device=None) -> Tensor(a)
  3446. variants: method
  3447. # Unlike pin_memory, this is guaranteed to give a new non-aliasing tensor
  3448. - func: _pin_memory(Tensor self, Device? device=None) -> Tensor
  3449. dispatch:
  3450. CUDA: _pin_memory_cuda
  3451. MPS: _pin_memory_mps
  3452. autogen: _pin_memory.out
  3453. - func: pinverse(Tensor self, float rcond=1e-15) -> Tensor
  3454. variants: function, method
  3455. - func: poisson_nll_loss(Tensor input, Tensor target, bool log_input, bool full, float eps, int reduction) -> Tensor
  3456. variants: function
  3457. - func: rad2deg(Tensor self) -> Tensor
  3458. variants: function, method
  3459. dispatch:
  3460. CompositeExplicitAutograd: rad2deg
  3461. SparseCPU, SparseCUDA: rad2deg_sparse
  3462. SparseCsrCPU, SparseCsrCUDA: rad2deg_sparse_csr
  3463. - func: rad2deg_(Tensor(a!) self) -> Tensor(a!)
  3464. variants: function, method
  3465. dispatch:
  3466. CompositeExplicitAutograd: rad2deg_
  3467. SparseCPU, SparseCUDA: rad2deg_sparse_
  3468. SparseCsrCPU, SparseCsrCUDA: rad2deg_sparse_csr_
  3469. - func: rad2deg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  3470. dispatch:
  3471. CompositeExplicitAutograd: rad2deg_out
  3472. SparseCPU, SparseCUDA: rad2deg_sparse_out
  3473. SparseCsrCPU, SparseCsrCUDA: rad2deg_sparse_csr_out
  3474. - func: deg2rad(Tensor self) -> Tensor
  3475. variants: function, method
  3476. dispatch:
  3477. CompositeExplicitAutograd: deg2rad
  3478. SparseCPU, SparseCUDA: deg2rad_sparse
  3479. SparseCsrCPU, SparseCsrCUDA: deg2rad_sparse_csr
  3480. tags: pointwise
  3481. - func: deg2rad_(Tensor(a!) self) -> Tensor(a!)
  3482. variants: function, method
  3483. dispatch:
  3484. CompositeExplicitAutograd: deg2rad_
  3485. SparseCPU, SparseCUDA: deg2rad_sparse_
  3486. SparseCsrCPU, SparseCsrCUDA: deg2rad_sparse_csr_
  3487. tags: pointwise
  3488. - func: deg2rad.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  3489. dispatch:
  3490. CompositeExplicitAutograd: deg2rad_out
  3491. SparseCPU, SparseCUDA: deg2rad_sparse_out
  3492. SparseCsrCPU, SparseCsrCUDA: deg2rad_sparse_csr_out
  3493. tags: pointwise
  3494. - func: scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  3495. dispatch:
  3496. CompositeExplicitAutograd: scalar_tensor
  3497. autogen: scalar_tensor.out
  3498. tags: core
  3499. - func: rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  3500. device_check: NoCheck
  3501. device_guard: False
  3502. dispatch:
  3503. CompositeExplicitAutograd: rand
  3504. autogen: rand.names_out
  3505. tags: nondeterministic_seeded
  3506. - func: rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  3507. device_check: NoCheck
  3508. device_guard: False
  3509. tags: nondeterministic_seeded
  3510. dispatch:
  3511. CompositeExplicitAutograd: rand
  3512. autogen: rand.generator_with_names_out
  3513. - func: rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  3514. tags: nondeterministic_seeded
  3515. dispatch:
  3516. CompositeExplicitAutograd: rand
  3517. - func: rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  3518. tags: nondeterministic_seeded
  3519. dispatch:
  3520. CompositeExplicitAutograd: rand
  3521. - func: rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
  3522. tags: nondeterministic_seeded
  3523. dispatch:
  3524. CompositeExplicitAutograd: rand_out
  3525. - func: rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
  3526. tags: nondeterministic_seeded
  3527. - func: rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
  3528. tags: nondeterministic_seeded
  3529. dispatch:
  3530. # NB: Although this composite mutates on the inside, it is
  3531. # non-differentiable so NonFunctional doesn't apply
  3532. CompositeExplicitAutograd: rand_like
  3533. autogen: rand_like.out
  3534. - func: randint(int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  3535. tags: nondeterministic_seeded
  3536. dispatch:
  3537. CompositeExplicitAutograd: randint
  3538. - func: randint.generator(int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  3539. tags: nondeterministic_seeded
  3540. dispatch:
  3541. CompositeExplicitAutograd: randint
  3542. - func: randint.low(int low, int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  3543. tags: nondeterministic_seeded
  3544. dispatch:
  3545. CompositeExplicitAutograd: randint
  3546. - func: randint.low_generator(int low, int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  3547. tags: nondeterministic_seeded
  3548. dispatch:
  3549. CompositeExplicitAutograd: randint
  3550. - func: randint.out(int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
  3551. tags: nondeterministic_seeded
  3552. dispatch:
  3553. CompositeExplicitAutograd: randint_out
  3554. - func: randint.generator_out(int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
  3555. tags: nondeterministic_seeded
  3556. dispatch:
  3557. CompositeExplicitAutograd: randint_out
  3558. - func: randint.low_out(int low, int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
  3559. tags: nondeterministic_seeded
  3560. dispatch:
  3561. CompositeExplicitAutograd: randint_out
  3562. - func: randint.low_generator_out(int low, int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
  3563. tags: nondeterministic_seeded
  3564. dispatch:
  3565. CompositeExplicitAutograd: randint_out
  3566. - func: randint_like(Tensor self, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
  3567. tags: nondeterministic_seeded
  3568. dispatch:
  3569. # NB: Although this composite mutates on the inside, it is
  3570. # non-differentiable so NonFunctional doesn't apply
  3571. CompositeExplicitAutograd: randint_like
  3572. autogen: randint_like.out
  3573. - func: randint_like.low_dtype(Tensor self, int low, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
  3574. tags: nondeterministic_seeded
  3575. dispatch:
  3576. # NB: Although this composite mutates on the inside, it is
  3577. # non-differentiable so NonFunctional doesn't apply
  3578. CompositeExplicitAutograd: randint_like
  3579. autogen: randint_like.low_dtype_out
  3580. - func: randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  3581. tags: nondeterministic_seeded
  3582. dispatch:
  3583. CompositeExplicitAutograd: randn
  3584. - func: randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  3585. tags: nondeterministic_seeded
  3586. dispatch:
  3587. CompositeExplicitAutograd: randn
  3588. - func: randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  3589. tags: nondeterministic_seeded
  3590. device_check: NoCheck
  3591. device_guard: False
  3592. dispatch:
  3593. CompositeExplicitAutograd: randn
  3594. autogen: randn.names_out
  3595. - func: randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  3596. tags: nondeterministic_seeded
  3597. device_check: NoCheck
  3598. device_guard: False
  3599. dispatch:
  3600. CompositeExplicitAutograd: randn
  3601. autogen: randn.generator_with_names_out
  3602. - func: randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
  3603. tags: nondeterministic_seeded
  3604. - func: randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
  3605. tags: nondeterministic_seeded
  3606. - func: randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
  3607. tags: nondeterministic_seeded
  3608. dispatch:
  3609. # NB: Although this composite mutates on the inside, it is
  3610. # non-differentiable so NonFunctional doesn't apply
  3611. CompositeExplicitAutograd: randn_like
  3612. autogen: randn_like.out
  3613. - func: randperm(int n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  3614. tags: nondeterministic_seeded
  3615. dispatch:
  3616. CompositeExplicitAutograd: randperm
  3617. - func: randperm.generator(int n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  3618. tags: nondeterministic_seeded
  3619. dispatch:
  3620. CompositeExplicitAutograd: randperm
  3621. - func: randperm.out(int n, *, Tensor(a!) out) -> Tensor(a!)
  3622. tags: nondeterministic_seeded
  3623. dispatch:
  3624. CompositeExplicitAutograd: randperm_out
  3625. - func: randperm.generator_out(int n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
  3626. tags: nondeterministic_seeded
  3627. dispatch:
  3628. CPU: randperm_out_cpu
  3629. CUDA: randperm_out_cuda
  3630. MPS: randperm_out_mps
  3631. - func: range.step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  3632. dispatch:
  3633. CompositeExplicitAutograd: range
  3634. - func: range(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  3635. dispatch:
  3636. CompositeExplicitAutograd: range
  3637. - func: range.out_(Scalar start, Scalar end, *, Tensor(a!) out) -> Tensor(a!)
  3638. dispatch:
  3639. CompositeExplicitAutograd: range_out_no_step
  3640. - func: range.out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
  3641. dispatch:
  3642. CPU, Meta: range_out
  3643. CUDA: range_cuda_out
  3644. MPS: range_mps_out
  3645. cpp_no_default_args: ['step']
  3646. - func: ravel(Tensor(a) self) -> Tensor(a)
  3647. variants: function, method
  3648. - func: reciprocal(Tensor self) -> Tensor
  3649. device_check: NoCheck # TensorIterator
  3650. structured_delegate: reciprocal.out
  3651. variants: function, method
  3652. tags: [core, pointwise]
  3653. - func: reciprocal_(Tensor(a!) self) -> Tensor(a!)
  3654. device_check: NoCheck # TensorIterator
  3655. structured_delegate: reciprocal.out
  3656. variants: function, method
  3657. tags: pointwise
  3658. - func: reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  3659. device_check: NoCheck # TensorIterator
  3660. structured: True
  3661. structured_inherits: TensorIteratorBase
  3662. dispatch:
  3663. CPU, CUDA: reciprocal_out
  3664. MPS: reciprocal_out_mps
  3665. tags: pointwise
  3666. - func: neg(Tensor self) -> Tensor
  3667. device_check: NoCheck # TensorIterator
  3668. structured_delegate: neg.out
  3669. variants: function, method
  3670. dispatch:
  3671. SparseCPU, SparseCUDA: neg_sparse
  3672. SparseCsrCPU, SparseCsrCUDA: neg_sparse_csr
  3673. NestedTensorCPU, NestedTensorCUDA: NestedTensor_neg
  3674. tags: [core, pointwise]
  3675. - func: neg_(Tensor(a!) self) -> Tensor(a!)
  3676. device_check: NoCheck # TensorIterator
  3677. structured_delegate: neg.out
  3678. variants: function, method
  3679. dispatch:
  3680. SparseCPU, SparseCUDA: neg_sparse_
  3681. SparseCsrCPU, SparseCsrCUDA: neg_sparse_csr_
  3682. NestedTensorCPU, NestedTensorCUDA: NestedTensor_neg_
  3683. tags: pointwise
  3684. - func: neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  3685. device_check: NoCheck # TensorIterator
  3686. structured: True
  3687. structured_inherits: TensorIteratorBase
  3688. dispatch:
  3689. CPU, CUDA: neg_out
  3690. MPS: neg_out_mps
  3691. SparseCPU, SparseCUDA: neg_out_sparse
  3692. SparseCsrCPU, SparseCsrCUDA: neg_sparse_csr_out
  3693. tags: pointwise
  3694. # Alias for neg
  3695. - func: negative(Tensor self) -> Tensor
  3696. variants: function, method
  3697. - func: negative_(Tensor(a!) self) -> Tensor(a!)
  3698. variants: function, method
  3699. - func: negative.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  3700. - func: repeat(Tensor self, SymInt[] repeats) -> Tensor
  3701. variants: method # This is method-only to match the previous tensor API. In the future we could make this a function too.
  3702. dispatch:
  3703. CompositeExplicitAutograd: repeat
  3704. MPS: repeat_mps
  3705. autogen: repeat.out
  3706. tags: core
  3707. - func: repeat_interleave.Tensor(Tensor repeats, *, int? output_size=None) -> Tensor
  3708. variants: function
  3709. dispatch:
  3710. CPU: repeat_interleave_cpu
  3711. CUDA: repeat_interleave_cuda
  3712. MPS: repeat_interleave_mps
  3713. tags: dynamic_output_shape
  3714. autogen: repeat_interleave.Tensor_out
  3715. - func: repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, int? output_size=None) -> Tensor
  3716. variants: function, method
  3717. - func: repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, int? output_size=None) -> Tensor
  3718. variants: function, method
  3719. dispatch:
  3720. CompositeImplicitAutograd: repeat_interleave_symint
  3721. - func: reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a)
  3722. variants: function, method
  3723. device_check: NoCheck
  3724. device_guard: False
  3725. dispatch:
  3726. CompositeImplicitAutograd: reshape_symint
  3727. CompositeImplicitAutogradNestedTensor: reshape_nested
  3728. - func: _reshape_copy(Tensor self, SymInt[] size) -> Tensor
  3729. variants: function
  3730. dispatch:
  3731. CompositeExplicitAutograd: _reshape_copy_symint
  3732. # NOTE [ _reshape_alias ] is meant to be used in the implementation of reshape.
  3733. # They are not user-facing, hence the leading underscore. Please don't use it
  3734. # anywhere else.
  3735. - func: _reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a)
  3736. variants: function, method
  3737. device_check: NoCheck
  3738. device_guard: False
  3739. dispatch:
  3740. CPU, CUDA, Meta, QuantizedCPU, QuantizedCUDA, ZeroTensor, MPS: _reshape_alias
  3741. # We don't need to support mkldnn since this is handled explicitly by the reshape operator.
  3742. - func: _mkldnn_reshape(Tensor self, int[] shape) -> Tensor
  3743. device_check: NoCheck
  3744. device_guard: False
  3745. dispatch:
  3746. MkldnnCPU: mkldnn_reshape
  3747. autogen: _mkldnn_reshape.out
  3748. - func: reshape_as(Tensor(a) self, Tensor other) -> Tensor(a)
  3749. variants: method
  3750. device_check: NoCheck
  3751. device_guard: False
  3752. dispatch:
  3753. CompositeImplicitAutograd: reshape_as
  3754. CompositeImplicitAutogradNestedTensor: reshape_as_nested
  3755. - func: round(Tensor self) -> Tensor
  3756. device_check: NoCheck # TensorIterator
  3757. structured_delegate: round.out
  3758. variants: function, method
  3759. dispatch:
  3760. SparseCPU, SparseCUDA: round_sparse
  3761. SparseCsrCPU, SparseCsrCUDA: round_sparse_csr
  3762. tags: pointwise
  3763. - func: round_(Tensor(a!) self) -> Tensor(a!)
  3764. device_check: NoCheck # TensorIterator
  3765. structured_delegate: round.out
  3766. variants: function, method
  3767. dispatch:
  3768. SparseCPU, SparseCUDA: round_sparse_
  3769. SparseCsrCPU, SparseCsrCUDA: round_sparse_csr_
  3770. tags: pointwise
  3771. - func: round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  3772. device_check: NoCheck # TensorIterator
  3773. structured: True
  3774. structured_inherits: TensorIteratorBase
  3775. dispatch:
  3776. CPU: round_out
  3777. CUDA: round_out
  3778. MPS: round_out_mps
  3779. SparseCPU, SparseCUDA: round_sparse_out
  3780. SparseCsrCPU, SparseCsrCUDA: round_sparse_csr_out
  3781. tags: pointwise
  3782. - func: round.decimals(Tensor self, *, int decimals) -> Tensor
  3783. device_check: NoCheck # TensorIterator
  3784. structured_delegate: round.decimals_out
  3785. variants: function, method
  3786. tags: pointwise
  3787. - func: round_.decimals(Tensor(a!) self, *, int decimals) -> Tensor(a!)
  3788. device_check: NoCheck # TensorIterator
  3789. structured_delegate: round.decimals_out
  3790. variants: function, method
  3791. tags: pointwise
  3792. - func: round.decimals_out(Tensor self, *, int decimals, Tensor(a!) out) -> Tensor(a!)
  3793. device_check: NoCheck # TensorIterator
  3794. structured: True
  3795. structured_inherits: TensorIteratorBase
  3796. dispatch:
  3797. CPU: round_decimals_out
  3798. CUDA: round_decimals_out
  3799. tags: pointwise
  3800. - func: rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor
  3801. device_check: NoCheck # TensorIterator
  3802. tags: nondeterministic_seeded
  3803. - func: rrelu_(Tensor(a!) self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!)
  3804. tags: nondeterministic_seeded
  3805. device_check: NoCheck # TensorIterator
  3806. - func: relu(Tensor self) -> Tensor
  3807. device_check: NoCheck # TensorIterator
  3808. variants: function, method
  3809. dispatch:
  3810. CPU, CUDA: relu
  3811. MPS: relu_mps
  3812. MkldnnCPU: mkldnn_relu
  3813. QuantizedCPU: relu_quantized_cpu
  3814. QuantizedCUDA: relu_quantized_cuda
  3815. NestedTensorCPU, NestedTensorCUDA: NestedTensor_relu
  3816. SparseCPU, SparseCUDA: relu_sparse
  3817. SparseCsrCPU, SparseCsrCUDA: relu_sparse_csr
  3818. tags: [core, pointwise]
  3819. - func: relu_(Tensor(a!) self) -> Tensor(a!)
  3820. device_check: NoCheck # TensorIterator
  3821. variants: function, method
  3822. dispatch:
  3823. CPU, CUDA: relu_
  3824. MPS: relu_mps_
  3825. MkldnnCPU: mkldnn_relu_
  3826. QuantizedCPU: relu_quantized_cpu_
  3827. QuantizedCUDA: relu_quantized_cuda_
  3828. NestedTensorCPU, NestedTensorCUDA: NestedTensor_relu_
  3829. SparseCPU, SparseCUDA: relu_sparse_
  3830. SparseCsrCPU, SparseCsrCUDA: relu_sparse_csr_
  3831. autogen: relu.out
  3832. tags: pointwise
  3833. - func: relu6(Tensor self) -> Tensor
  3834. python_module: nn
  3835. - func: relu6_(Tensor(a!) self) -> Tensor(a!)
  3836. python_module: nn
  3837. - func: prelu(Tensor self, Tensor weight) -> Tensor
  3838. variants: function, method
  3839. autogen: prelu.out
  3840. - func: _prelu_kernel(Tensor self, Tensor weight) -> Tensor
  3841. dispatch:
  3842. CPU, CUDA: _prelu_kernel
  3843. QuantizedCPU: _prelu_kernel_quantized_cpu
  3844. MkldnnCPU: mkldnn_prelu
  3845. MPS: prelu_mps
  3846. - func: _prelu_kernel_backward(Tensor grad_output, Tensor self, Tensor weight) -> (Tensor, Tensor)
  3847. dispatch:
  3848. CPU, CUDA: _prelu_kernel_backward
  3849. MkldnnCPU: mkldnn_prelu_backward
  3850. MPS: prelu_backward_mps
  3851. - func: gelu.out(Tensor self, *, str approximate='none', Tensor(a!) out) -> Tensor(a!)
  3852. structured: True
  3853. structured_inherits: TensorIteratorBase
  3854. device_check: NoCheck # TensorIterator
  3855. python_module: nn
  3856. dispatch:
  3857. CPU: gelu_out_cpu
  3858. CUDA: gelu_out_cuda
  3859. MPS: gelu_out_mps
  3860. - func: gelu_(Tensor(a!) self, *, str approximate='none') -> Tensor(a!)
  3861. structured_delegate: gelu.out
  3862. device_check: NoCheck # TensorIterator
  3863. python_module: nn
  3864. dispatch:
  3865. NestedTensorCPU, NestedTensorCUDA: NestedTensor_gelu_
  3866. - func: gelu(Tensor self, *, str approximate='none') -> Tensor
  3867. structured_delegate: gelu.out
  3868. device_check: NoCheck # TensorIterator
  3869. python_module: nn
  3870. dispatch:
  3871. MkldnnCPU: mkldnn_gelu
  3872. QuantizedCPU: gelu_quantized_cpu
  3873. QuantizedCUDA: gelu_quantized_cuda
  3874. NestedTensorCPU, NestedTensorCUDA: NestedTensor_gelu
  3875. tags: [core, pointwise]
  3876. - func: gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate='none', Tensor(a!) grad_input) -> Tensor(a!)
  3877. structured: True
  3878. structured_inherits: TensorIteratorBase
  3879. python_module: nn
  3880. dispatch:
  3881. CPU: gelu_backward_out_cpu
  3882. CUDA: gelu_backward_out_cuda
  3883. MPS: gelu_backward_out_mps
  3884. - func: gelu_backward(Tensor grad_output, Tensor self, *, str approximate='none') -> Tensor
  3885. structured_delegate: gelu_backward.grad_input
  3886. python_module: nn
  3887. dispatch:
  3888. MkldnnCPU: mkldnn_gelu_backward
  3889. NestedTensorCPU, NestedTensorCUDA: gelu_backwards_nested
  3890. tags: pointwise
  3891. - func: infinitely_differentiable_gelu_backward(Tensor grad, Tensor self) -> Tensor
  3892. variants: function
  3893. python_module: nn
  3894. device_check: NoCheck
  3895. device_guard: False
  3896. - func: hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)
  3897. structured: True
  3898. structured_inherits: TensorIteratorBase
  3899. device_check: NoCheck # TensorIterator
  3900. dispatch:
  3901. CPU, CUDA: hardshrink_out
  3902. - func: hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor
  3903. structured_delegate: hardshrink.out
  3904. device_check: NoCheck # TensorIterator
  3905. variants: function, method
  3906. - func: hardshrink_backward.grad_input(Tensor grad_out, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)
  3907. structured: True
  3908. structured_inherits: TensorIteratorBase
  3909. dispatch:
  3910. CPU, CUDA: hardshrink_backward_out
  3911. - func: hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor
  3912. structured_delegate: hardshrink_backward.grad_input
  3913. variants: function, method
  3914. - func: rsqrt(Tensor self) -> Tensor
  3915. device_check: NoCheck # TensorIterator
  3916. structured_delegate: rsqrt.out
  3917. variants: function, method
  3918. tags: [core, pointwise]
  3919. - func: rsqrt_(Tensor(a!) self) -> Tensor(a!)
  3920. device_check: NoCheck # TensorIterator
  3921. structured_delegate: rsqrt.out
  3922. variants: function, method
  3923. tags: pointwise
  3924. - func: rsqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  3925. device_check: NoCheck # TensorIterator
  3926. structured: True
  3927. structured_inherits: TensorIteratorBase
  3928. dispatch:
  3929. CPU, CUDA: rsqrt_out
  3930. MPS: rsqrt_out_mps
  3931. tags: pointwise
  3932. - func: select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a)
  3933. variants: function, method
  3934. device_check: NoCheck
  3935. device_guard: False
  3936. - func: select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a)
  3937. variants: function, method
  3938. device_check: NoCheck
  3939. device_guard: False
  3940. dispatch:
  3941. CompositeExplicitAutograd: select_symint
  3942. SparseCsrCPU, SparseCsrCUDA: select_sparse_csr
  3943. NestedTensorCPU, NestedTensorCUDA: select_nested
  3944. tags: core
  3945. - func: select_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index) -> Tensor
  3946. variants: function
  3947. device_check: NoCheck
  3948. device_guard: False
  3949. dispatch:
  3950. CompositeExplicitAutogradNonFunctional: select_backward_symint
  3951. autogen: select_backward.out
  3952. - func: _nested_select_backward(Tensor grad_output, Tensor self, int dim, SymInt index) -> Tensor
  3953. variants: function
  3954. device_check: NoCheck
  3955. device_guard: False
  3956. dispatch:
  3957. NestedTensorCPU, NestedTensorCUDA: _nested_select_backward_symint
  3958. - func: selu(Tensor self) -> Tensor
  3959. device_check: NoCheck # TensorIterator
  3960. - func: selu_(Tensor(a!) self) -> Tensor(a!)
  3961. device_check: NoCheck # TensorIterator
  3962. - func: celu(Tensor self, Scalar alpha=1.0) -> Tensor
  3963. device_check: NoCheck # TensorIterator
  3964. dispatch:
  3965. CompositeExplicitAutograd: celu
  3966. - func: celu_(Tensor(a!) self, Scalar alpha=1.0) -> Tensor(a!)
  3967. device_check: NoCheck # TensorIterator
  3968. dispatch:
  3969. CompositeExplicitAutograd: celu_
  3970. autogen: celu.out
  3971. - func: silu(Tensor self) -> Tensor
  3972. structured_delegate: silu.out
  3973. python_module: nn
  3974. - func: silu_(Tensor(a!) self) -> Tensor(a!)
  3975. structured_delegate: silu.out
  3976. python_module: nn
  3977. - func: silu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  3978. structured: True
  3979. structured_inherits: TensorIteratorBase
  3980. python_module: nn
  3981. dispatch:
  3982. CPU, CUDA: silu_out
  3983. MPS: silu_out_mps
  3984. - func: silu_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)
  3985. structured: True
  3986. structured_inherits: TensorIteratorBase
  3987. python_module: nn
  3988. dispatch:
  3989. CPU, CUDA: silu_backward_out
  3990. MPS: silu_backward_out_mps
  3991. - func: silu_backward(Tensor grad_output, Tensor self) -> Tensor
  3992. structured_delegate: silu_backward.grad_input
  3993. python_module: nn
  3994. dispatch:
  3995. CompositeImplicitAutograd: math_silu_backward
  3996. - func: mish(Tensor self) -> Tensor
  3997. structured_delegate: mish.out
  3998. python_module: nn
  3999. - func: mish_(Tensor(a!) self) -> Tensor(a!)
  4000. structured_delegate: mish.out
  4001. python_module: nn
  4002. - func: mish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  4003. structured: True
  4004. structured_inherits: TensorIteratorBase
  4005. python_module: nn
  4006. dispatch:
  4007. CPU, CUDA: mish_out
  4008. - func: mish_backward(Tensor grad_output, Tensor self) -> Tensor
  4009. python_module: nn
  4010. dispatch:
  4011. CPU, CUDA: mish_backward
  4012. CompositeImplicitAutograd: math_mish_backward
  4013. - func: sigmoid(Tensor self) -> Tensor
  4014. device_check: NoCheck # TensorIterator
  4015. structured_delegate: sigmoid.out
  4016. variants: function, method
  4017. dispatch:
  4018. QuantizedCPU: sigmoid_quantized_cpu
  4019. MkldnnCPU: mkldnn_sigmoid
  4020. tags: [core, pointwise]
  4021. - func: sigmoid_(Tensor(a!) self) -> Tensor(a!)
  4022. device_check: NoCheck # TensorIterator
  4023. structured_delegate: sigmoid.out
  4024. variants: function, method
  4025. dispatch:
  4026. MkldnnCPU: mkldnn_sigmoid_
  4027. tags: pointwise
  4028. - func: sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  4029. device_check: NoCheck # TensorIterator
  4030. structured: True
  4031. structured_inherits: TensorIteratorBase
  4032. dispatch:
  4033. CPU, CUDA: sigmoid_out
  4034. MPS: sigmoid_out_mps
  4035. tags: pointwise
  4036. - func: logit(Tensor self, float? eps=None) -> Tensor
  4037. variants: function, method
  4038. dispatch:
  4039. CPU, CUDA: logit
  4040. tags: pointwise
  4041. - func: logit_(Tensor(a!) self, float? eps=None) -> Tensor(a!)
  4042. variants: function, method
  4043. dispatch:
  4044. CPU, CUDA: logit_
  4045. tags: pointwise
  4046. - func: logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)
  4047. dispatch:
  4048. CPU, CUDA: logit_out
  4049. tags: pointwise
  4050. - func: sin(Tensor self) -> Tensor
  4051. device_check: NoCheck # TensorIterator
  4052. structured_delegate: sin.out
  4053. variants: function, method
  4054. dispatch:
  4055. SparseCsrCPU, SparseCsrCUDA: sin_sparse_csr
  4056. SparseCPU, SparseCUDA: sin_sparse
  4057. tags: [core, pointwise]
  4058. - func: sin_(Tensor(a!) self) -> Tensor(a!)
  4059. device_check: NoCheck # TensorIterator
  4060. structured_delegate: sin.out
  4061. variants: function, method
  4062. dispatch:
  4063. SparseCsrCPU, SparseCsrCUDA: sin_sparse_csr_
  4064. SparseCPU, SparseCUDA: sin_sparse_
  4065. tags: pointwise
  4066. - func: sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  4067. device_check: NoCheck # TensorIterator
  4068. structured: True
  4069. structured_inherits: TensorIteratorBase
  4070. dispatch:
  4071. CPU, CUDA: sin_out
  4072. MPS: sin_out_mps
  4073. SparseCsrCPU, SparseCsrCUDA: sin_sparse_csr_out
  4074. SparseCPU, SparseCUDA: sin_sparse_out
  4075. tags: pointwise
  4076. - func: sinc(Tensor self) -> Tensor
  4077. structured_delegate: sinc.out
  4078. variants: function, method
  4079. tags: pointwise
  4080. - func: sinc_(Tensor(a!) self) -> Tensor(a!)
  4081. structured_delegate: sinc.out
  4082. variants: function, method
  4083. tags: pointwise
  4084. - func: sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  4085. structured: True
  4086. structured_inherits: TensorIteratorBase
  4087. dispatch:
  4088. CPU, CUDA: sinc_out
  4089. tags: pointwise
  4090. - func: sinh(Tensor self) -> Tensor
  4091. device_check: NoCheck # TensorIterator
  4092. structured_delegate: sinh.out
  4093. variants: function, method
  4094. dispatch:
  4095. SparseCPU, SparseCUDA: sinh_sparse
  4096. SparseCsrCPU, SparseCsrCUDA: sinh_sparse_csr
  4097. tags: [core, pointwise]
  4098. - func: sinh_(Tensor(a!) self) -> Tensor(a!)
  4099. device_check: NoCheck # TensorIterator
  4100. structured_delegate: sinh.out
  4101. variants: function, method
  4102. dispatch:
  4103. SparseCPU, SparseCUDA: sinh_sparse_
  4104. SparseCsrCPU, SparseCsrCUDA: sinh_sparse_csr_
  4105. tags: pointwise
  4106. - func: sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  4107. device_check: NoCheck # TensorIterator
  4108. structured: True
  4109. structured_inherits: TensorIteratorBase
  4110. dispatch:
  4111. CPU, CUDA: sinh_out
  4112. MPS: sinh_out_mps
  4113. SparseCPU, SparseCUDA: sinh_sparse_out
  4114. SparseCsrCPU, SparseCsrCUDA: sinh_sparse_csr_out
  4115. # Returns a copy of this `Variable` that is detached from its autograd graph.
  4116. # This method is OK to call if the `Variable` is a view.
  4117. #
  4118. # NOTE: Previously, if we change the tensor metadata (e.g. sizes / strides /
  4119. # storage / storage_offset) of a tensor created from `detach()`, those metadata
  4120. # in the original tensor will also be updated. However, the new behavior is that
  4121. # those metadata changes to the detached tensor will not update the original tensor
  4122. # anymore, and in the `detach()` function we need to set `allow_tensor_metadata_change_`
  4123. # to false to make such changes explicitly illegal, in order to prevent users from
  4124. # changing metadata of the detached tensor and expecting the original tensor to also
  4125. # be updated.
  4126. tags: pointwise
  4127. - func: detach(Tensor(a) self) -> Tensor(a)
  4128. variants: function, method
  4129. dispatch:
  4130. CompositeExplicitAutograd: detach
  4131. NestedTensorCPU, NestedTensorCUDA: detach
  4132. # Like `detach()`, but modifies this `Variable` in-place. This method may
  4133. # only be called on non-view `Variable`s. You can use `is_view()` to check
  4134. # this. If this `Variable` is a view, throws an `std::runtime_error()`.
  4135. - func: detach_(Tensor(a!) self) -> Tensor(a!)
  4136. variants: function, method
  4137. tags: inplace_view
  4138. dispatch:
  4139. CompositeExplicitAutograd: detach_
  4140. - func: size.int(Tensor self, int dim) -> int
  4141. variants: function
  4142. device_check: NoCheck
  4143. device_guard: False
  4144. manual_cpp_binding: True
  4145. - func: size.Dimname(Tensor self, Dimname dim) -> int
  4146. variants: function, method
  4147. device_check: NoCheck
  4148. device_guard: False
  4149. - func: slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a)
  4150. variants: function, method
  4151. device_check: NoCheck
  4152. device_guard: False
  4153. dispatch:
  4154. CompositeExplicitAutograd: slice
  4155. tags: core
  4156. # NOTE: The implementation of split_with_sizes bypasses the dispatcher to call this; undo
  4157. # that if adding specific implementations here!
  4158. - func: slice_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step) -> Tensor
  4159. variants: function
  4160. device_check: NoCheck
  4161. device_guard: False
  4162. dispatch:
  4163. CompositeExplicitAutograd: slice_backward
  4164. autogen: slice_backward.out
  4165. - func: slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor
  4166. variants: function, method
  4167. device_check: NoCheck
  4168. device_guard: False
  4169. dispatch:
  4170. CompositeExplicitAutograd: slice_scatter
  4171. autogen: slice_scatter.out
  4172. tags: core
  4173. - func: select_scatter(Tensor self, Tensor src, int dim, SymInt index) -> Tensor
  4174. variants: function, method
  4175. device_check: NoCheck
  4176. device_guard: False
  4177. dispatch:
  4178. CompositeExplicitAutograd: select_scatter_symint
  4179. autogen: select_scatter.out
  4180. - func: diagonal_scatter(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1) -> Tensor
  4181. variants: function, method
  4182. device_check: NoCheck
  4183. device_guard: False
  4184. dispatch:
  4185. CompositeExplicitAutograd: diagonal_scatter
  4186. autogen: diagonal_scatter.out
  4187. - func: as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor
  4188. variants: function, method
  4189. device_check: NoCheck
  4190. device_guard: False
  4191. dispatch:
  4192. CompositeExplicitAutograd: as_strided_scatter_symint
  4193. autogen: as_strided_scatter.out
  4194. - func: smm(Tensor self, Tensor mat2) -> Tensor
  4195. variants: function, method
  4196. # softmax allows positional dtype, unlike most operators, because kwonly is BC-breaking when loading jit models.
  4197. - func: softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
  4198. variants: function, method
  4199. - func: softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)
  4200. variants: function
  4201. dispatch:
  4202. CompositeExplicitAutograd: softmax_out
  4203. - func: softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
  4204. variants: function, method
  4205. - func: _softmax(Tensor self, int dim, bool half_to_float) -> Tensor
  4206. structured_delegate: _softmax.out
  4207. dispatch:
  4208. MkldnnCPU: mkldnn_softmax
  4209. NestedTensorCPU, NestedTensorCUDA: softmax_nested
  4210. tags: core
  4211. - func: _softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
  4212. structured: True
  4213. dispatch:
  4214. CPU: softmax_cpu_out
  4215. CUDA: softmax_cuda_out
  4216. MPS: softmax_mps_out
  4217. - func: _softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor
  4218. structured_delegate: _softmax_backward_data.out
  4219. dispatch:
  4220. NestedTensorCPU, NestedTensorCUDA: nested_softmax_backward
  4221. - func: _softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) grad_input) -> Tensor(a!)
  4222. structured: True
  4223. dispatch:
  4224. CPU: softmax_backward_cpu_out
  4225. CUDA: softmax_backward_cuda_out
  4226. MPS: softmax_backward_mps_out
  4227. - func: unsafe_split.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]
  4228. variants: function, method
  4229. device_check: NoCheck
  4230. device_guard: False
  4231. dispatch:
  4232. CompositeExplicitAutograd: unsafe_split
  4233. autogen: unsafe_split.Tensor_out
  4234. - func: split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[]
  4235. variants: function, method
  4236. device_check: NoCheck
  4237. device_guard: False
  4238. dispatch:
  4239. CompositeExplicitAutograd: split
  4240. - func: split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[]
  4241. variants: function, method
  4242. device_guard: False
  4243. dispatch:
  4244. CompositeImplicitAutograd: split_symint
  4245. - func: unsafe_split_with_sizes(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]
  4246. variants: function, method
  4247. device_check: NoCheck
  4248. device_guard: False
  4249. dispatch:
  4250. CompositeExplicitAutograd: unsafe_split_with_sizes
  4251. autogen: unsafe_split_with_sizes.out
  4252. - func: split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[]
  4253. variants: function, method
  4254. device_check: NoCheck
  4255. device_guard: False
  4256. dispatch:
  4257. CompositeExplicitAutograd: split_with_sizes
  4258. - func: hsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]
  4259. variants: function, method
  4260. - func: hsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]
  4261. variants: function, method
  4262. - func: vsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]
  4263. variants: function, method
  4264. - func: vsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]
  4265. variants: function, method
  4266. - func: dsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]
  4267. variants: function, method
  4268. - func: dsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]
  4269. variants: function, method
  4270. - func: squeeze(Tensor(a) self) -> Tensor(a)
  4271. variants: function, method
  4272. device_check: NoCheck
  4273. device_guard: False
  4274. dispatch:
  4275. CompositeExplicitAutograd: squeeze
  4276. QuantizedCPU, QuantizedCUDA: squeeze_quantized
  4277. NestedTensorCPU, NestedTensorCUDA: squeeze_nested
  4278. - func: squeeze.dim(Tensor(a) self, int dim) -> Tensor(a)
  4279. variants: function, method
  4280. device_check: NoCheck
  4281. device_guard: False
  4282. dispatch:
  4283. CompositeExplicitAutograd: squeeze
  4284. QuantizedCPU, QuantizedCUDA: squeeze_quantized
  4285. NestedTensorCPU, NestedTensorCUDA: squeeze_dim_nested
  4286. tags: core
  4287. - func: squeeze.dimname(Tensor(a) self, Dimname dim) -> Tensor(a)
  4288. variants: function, method
  4289. device_check: NoCheck
  4290. device_guard: False
  4291. - func: squeeze.dims(Tensor(a) self, int[] dim) -> Tensor(a)
  4292. variants: function, method
  4293. device_check: NoCheck
  4294. device_guard: False
  4295. dispatch:
  4296. CompositeExplicitAutograd: squeeze
  4297. QuantizedCPU, QuantizedCUDA: squeeze_quantized
  4298. NestedTensorCPU, NestedTensorCUDA: squeeze_dim_nested
  4299. tags: core
  4300. - func: squeeze_(Tensor(a!) self) -> Tensor(a!)
  4301. variants: method
  4302. device_check: NoCheck
  4303. device_guard: False
  4304. tags: inplace_view
  4305. dispatch:
  4306. CompositeExplicitAutograd: squeeze_
  4307. - func: squeeze_.dim(Tensor(a!) self, int dim) -> Tensor(a!)
  4308. variants: method
  4309. device_check: NoCheck
  4310. device_guard: False
  4311. tags: inplace_view
  4312. dispatch:
  4313. CompositeExplicitAutograd: squeeze_
  4314. - func: squeeze_.dims(Tensor(a!) self, int[] dim) -> Tensor(a!)
  4315. variants: method
  4316. device_check: NoCheck
  4317. device_guard: False
  4318. tags: inplace_view
  4319. dispatch:
  4320. CompositeExplicitAutograd: squeeze_
  4321. - func: squeeze_.dimname(Tensor(a!) self, Dimname dim) -> Tensor(a!)
  4322. variants: method
  4323. device_check: NoCheck
  4324. device_guard: False
  4325. tags: inplace_view
  4326. - func: sspaddmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
  4327. variants: function, method
  4328. - func: sspaddmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
  4329. dispatch:
  4330. CPU: _sspaddmm_out_only_sparse
  4331. CUDA: _sspaddmm_out_only_sparse_cuda
  4332. SparseCPU: _sspaddmm_out_cpu
  4333. SparseCUDA: _sspaddmm_out_cuda
  4334. - func: stack(Tensor[] tensors, int dim=0) -> Tensor
  4335. dispatch:
  4336. CompositeExplicitAutograd: stack
  4337. - func: stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
  4338. dispatch:
  4339. CompositeExplicitAutograd: stack_out
  4340. - func: _stack(Tensor[] tensors, int dim=0) -> Tensor
  4341. dispatch: # match the backends supported by _cat
  4342. CPU: _stack_cpu
  4343. CompositeExplicitAutograd: _stack
  4344. - func: _stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
  4345. dispatch: # match the backends supported by _cat_out
  4346. CPU: _stack_out_cpu
  4347. CompositeExplicitAutograd: _stack_out
  4348. - func: hstack(Tensor[] tensors) -> Tensor
  4349. - func: hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
  4350. - func: vstack(Tensor[] tensors) -> Tensor
  4351. - func: vstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
  4352. - func: dstack(Tensor[] tensors) -> Tensor
  4353. - func: dstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
  4354. # Overload without center & pad mode, needed for forward-compatibility
  4355. - func: stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor
  4356. variants: function, method
  4357. cpp_no_default_args: ['hop_length', 'win_length', 'window', 'normalized']
  4358. - func: stft.center(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, str pad_mode="reflect", bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor
  4359. variants: function, method
  4360. - func: istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> Tensor
  4361. variants: function, method
  4362. - func: stride.int(Tensor self, int dim) -> int
  4363. variants: function
  4364. device_check: NoCheck
  4365. device_guard: False
  4366. manual_cpp_binding: True
  4367. - func: stride.Dimname(Tensor self, Dimname dim) -> int
  4368. variants: function, method
  4369. device_check: NoCheck
  4370. device_guard: False
  4371. - func: sum(Tensor self, *, ScalarType? dtype=None) -> Tensor
  4372. device_check: NoCheck # TensorIterator
  4373. variants: function, method
  4374. dispatch:
  4375. CompositeExplicitAutograd: sum
  4376. SparseCPU, SparseCUDA: sum_coo
  4377. SparseCsrCPU, SparseCsrCUDA: sum_csr
  4378. autogen: sum.out
  4379. - func: sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
  4380. structured_delegate: sum.IntList_out
  4381. device_check: NoCheck # TensorIterator
  4382. variants: function, method
  4383. dispatch:
  4384. NestedTensorCPU: NestedTensor_sum_dim_CPU
  4385. SparseCPU, SparseCUDA: sum_sparse_coo
  4386. tags: core
  4387. - func: sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
  4388. device_check: NoCheck # TensorIterator
  4389. variants: function, method
  4390. - func: sum.IntList_out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
  4391. structured: True
  4392. device_check: NoCheck # TensorIterator
  4393. dispatch:
  4394. CPU, CUDA: sum_out
  4395. MPS: sum_out_mps
  4396. - func: sum.DimnameList_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
  4397. device_check: NoCheck # TensorIterator
  4398. # TODO: this function will be replaced once nested expand semantics have been settled on
  4399. - func: _nested_sum_backward(Tensor grad, Tensor self, int[1]? dim, bool keepdim=False) -> Tensor
  4400. dispatch:
  4401. NestedTensorCPU: _nested_sum_backward_cpu
  4402. - func: nansum(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
  4403. variants: function, method
  4404. dispatch:
  4405. CPU, CUDA: nansum
  4406. MPS: nansum_mps
  4407. - func: nansum.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
  4408. dispatch:
  4409. CPU, CUDA: nansum_out
  4410. MPS: nansum_out_mps
  4411. - func: sum_to_size(Tensor self, int[] size) -> Tensor
  4412. variants: method
  4413. device_check: NoCheck
  4414. device_guard: False
  4415. - func: sqrt(Tensor self) -> Tensor
  4416. device_check: NoCheck # TensorIterator
  4417. structured_delegate: sqrt.out
  4418. variants: function, method
  4419. dispatch:
  4420. SparseCPU, SparseCUDA: sqrt_sparse
  4421. SparseCsrCPU, SparseCsrCUDA: sqrt_sparse_csr
  4422. tags: [core, pointwise]
  4423. - func: sqrt_(Tensor(a!) self) -> Tensor(a!)
  4424. device_check: NoCheck # TensorIterator
  4425. structured_delegate: sqrt.out
  4426. variants: function, method
  4427. dispatch:
  4428. SparseCPU, SparseCUDA: sqrt_sparse_
  4429. SparseCsrCPU, SparseCsrCUDA: sqrt_sparse_csr_
  4430. tags: pointwise
  4431. - func: sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  4432. device_check: NoCheck # TensorIterator
  4433. structured: True
  4434. structured_inherits: TensorIteratorBase
  4435. dispatch:
  4436. CPU, CUDA: sqrt_out
  4437. MPS: sqrt_out_mps
  4438. SparseCPU, SparseCUDA: sqrt_sparse_out
  4439. SparseCsrCPU, SparseCsrCUDA: sqrt_sparse_csr_out
  4440. tags: pointwise
  4441. - func: square(Tensor self) -> Tensor
  4442. device_check: NoCheck # TensorIterator
  4443. variants: function, method
  4444. tags: pointwise
  4445. - func: square_(Tensor(a!) self) -> Tensor(a!)
  4446. device_check: NoCheck # TensorIterator
  4447. variants: function, method
  4448. tags: pointwise
  4449. - func: square.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  4450. tags: pointwise
  4451. - func: std(Tensor self, bool unbiased=True) -> Tensor
  4452. device_check: NoCheck # TensorIterator
  4453. variants: function, method
  4454. cpp_no_default_args: ["unbiased"]
  4455. - func: std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor
  4456. device_check: NoCheck # TensorIterator
  4457. variants: function, method
  4458. cpp_no_default_args: ["unbiased"]
  4459. - func: std.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> Tensor
  4460. device_check: NoCheck # TensorIterator
  4461. variants: function, method
  4462. dispatch:
  4463. CPU, CUDA: std
  4464. MPS: std_mps
  4465. QuantizedCPU: std_quantized_cpu
  4466. - func: std_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)
  4467. device_check: NoCheck # TensorIterator
  4468. variants: function
  4469. cpp_no_default_args: ["unbiased"]
  4470. - func: std_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
  4471. device_check: NoCheck # TensorIterator
  4472. variants: function
  4473. cpp_no_default_args: ["unbiased"]
  4474. - func: std_mean.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor)
  4475. device_check: NoCheck # TensorIterator
  4476. variants: function
  4477. dispatch:
  4478. CPU, CUDA: std_mean
  4479. autogen: std_mean.correction_out
  4480. - func: std_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
  4481. device_check: NoCheck # TensorIterator
  4482. variants: function
  4483. cpp_no_default_args: ["unbiased"]
  4484. - func: std_mean.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor)
  4485. device_check: NoCheck # TensorIterator
  4486. variants: function
  4487. - func: std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
  4488. device_check: NoCheck # TensorIterator
  4489. cpp_no_default_args: ["unbiased"]
  4490. - func: std.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
  4491. device_check: NoCheck # TensorIterator
  4492. dispatch:
  4493. CPU, CUDA: std_out
  4494. QuantizedCPU: std_out_quantized_cpu
  4495. - func: std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor
  4496. device_check: NoCheck # TensorIterator
  4497. variants: function, method
  4498. cpp_no_default_args: ["unbiased"]
  4499. - func: std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
  4500. device_check: NoCheck # TensorIterator
  4501. cpp_no_default_args: ["unbiased"]
  4502. - func: std.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> Tensor
  4503. device_check: NoCheck # TensorIterator
  4504. variants: function, method
  4505. - func: std.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
  4506. device_check: NoCheck # TensorIterator
  4507. variants: function
  4508. - func: prod(Tensor self, *, ScalarType? dtype=None) -> Tensor
  4509. device_check: NoCheck # TensorIterator
  4510. variants: function, method
  4511. dispatch:
  4512. CPU, CUDA: prod
  4513. MPS: prod_mps
  4514. autogen: prod.out
  4515. - func: prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
  4516. structured_delegate: prod.int_out
  4517. device_check: NoCheck # TensorIterator
  4518. variants: function, method
  4519. - func: prod.int_out(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
  4520. structured: True
  4521. device_check: NoCheck # TensorIterator
  4522. dispatch:
  4523. CPU, CUDA: prod_out
  4524. MPS: prod_out_mps
  4525. - func: prod.dim_Dimname(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
  4526. device_check: NoCheck # TensorIterator
  4527. variants: function, method
  4528. - func: prod.Dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
  4529. device_check: NoCheck # TensorIterator
  4530. - func: t(Tensor(a) self) -> Tensor(a)
  4531. device_check: NoCheck
  4532. device_guard: False
  4533. variants: function, method
  4534. dispatch:
  4535. CompositeExplicitAutograd: t
  4536. - func: t_(Tensor(a!) self) -> Tensor(a!)
  4537. device_check: NoCheck
  4538. device_guard: False
  4539. variants: method
  4540. tags: inplace_view
  4541. dispatch:
  4542. CompositeExplicitAutograd: t_
  4543. - func: tan(Tensor self) -> Tensor
  4544. device_check: NoCheck # TensorIterator
  4545. structured_delegate: tan.out
  4546. variants: function, method
  4547. dispatch:
  4548. SparseCPU, SparseCUDA: tan_sparse
  4549. SparseCsrCPU, SparseCsrCUDA: tan_sparse_csr
  4550. tags: pointwise
  4551. - func: tan_(Tensor(a!) self) -> Tensor(a!)
  4552. device_check: NoCheck # TensorIterator
  4553. structured_delegate: tan.out
  4554. variants: function, method
  4555. dispatch:
  4556. SparseCPU, SparseCUDA: tan_sparse_
  4557. SparseCsrCPU, SparseCsrCUDA: tan_sparse_csr_
  4558. tags: pointwise
  4559. - func: tan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  4560. device_check: NoCheck # TensorIterator
  4561. structured: True
  4562. structured_inherits: TensorIteratorBase
  4563. dispatch:
  4564. CPU, CUDA: tan_out
  4565. MPS: tan_out_mps
  4566. SparseCPU, SparseCUDA: tan_sparse_out
  4567. SparseCsrCPU, SparseCsrCUDA: tan_sparse_csr_out
  4568. tags: pointwise
  4569. - func: tanh(Tensor self) -> Tensor
  4570. device_check: NoCheck # TensorIterator
  4571. structured_delegate: tanh.out
  4572. variants: function, method
  4573. dispatch:
  4574. QuantizedCPU: tanh_quantized_cpu
  4575. MkldnnCPU: mkldnn_tanh
  4576. SparseCPU, SparseCUDA: tanh_sparse
  4577. SparseCsrCPU, SparseCsrCUDA: tanh_sparse_csr
  4578. NestedTensorCPU, NestedTensorCUDA: NestedTensor_tanh
  4579. tags: [core, pointwise]
  4580. - func: tanh_(Tensor(a!) self) -> Tensor(a!)
  4581. device_check: NoCheck # TensorIterator
  4582. structured_delegate: tanh.out
  4583. variants: function, method
  4584. dispatch:
  4585. MkldnnCPU: mkldnn_tanh_
  4586. SparseCPU, SparseCUDA: tanh_sparse_
  4587. SparseCsrCPU, SparseCsrCUDA: tanh_sparse_csr_
  4588. NestedTensorCPU, NestedTensorCUDA: NestedTensor_tanh_
  4589. tags: pointwise
  4590. - func: tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  4591. device_check: NoCheck # TensorIterator
  4592. structured: True
  4593. structured_inherits: TensorIteratorBase
  4594. dispatch:
  4595. CPU, CUDA: tanh_out
  4596. MPS: tanh_out_mps
  4597. SparseCPU, SparseCUDA: tanh_sparse_out
  4598. SparseCsrCPU, SparseCsrCUDA: tanh_sparse_csr_out
  4599. tags: pointwise
  4600. - func: tensordot(Tensor self, Tensor other, int[] dims_self, int[] dims_other) -> Tensor
  4601. variants: function
  4602. - func: tensordot.out(Tensor self, Tensor other, int[] dims_self, int[] dims_other, *, Tensor(a!) out) -> Tensor(a!)
  4603. variants: function
  4604. dispatch:
  4605. CPU, CUDA: tensordot_out
  4606. # TODO: namespace threshold in 'nn'
  4607. - func: threshold(Tensor self, Scalar threshold, Scalar value) -> Tensor
  4608. device_check: NoCheck # TensorIterator
  4609. variants: function
  4610. structured_delegate: threshold.out
  4611. dispatch:
  4612. QuantizedCPU: threshold_quantized_cpu
  4613. - func: threshold_(Tensor(a!) self, Scalar threshold, Scalar value) -> Tensor(a!)
  4614. device_check: NoCheck # TensorIterator
  4615. variants: function
  4616. structured_delegate: threshold.out
  4617. - func: threshold.out(Tensor self, Scalar threshold, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
  4618. device_check: NoCheck # TensorIterator
  4619. structured: True
  4620. structured_inherits: TensorIteratorBase
  4621. dispatch:
  4622. CPU, CUDA: threshold_out
  4623. MPS: threshold_out_mps
  4624. - func: threshold_backward.grad_input(Tensor grad_output, Tensor self, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!)
  4625. structured: True
  4626. structured_inherits: TensorIteratorBase
  4627. dispatch:
  4628. CPU, CUDA: threshold_backward_out
  4629. MPS: threshold_backward_out_mps
  4630. SparseCPU, SparseCUDA: threshold_backward_sparse_out
  4631. SparseCsrCPU, SparseCsrCUDA: threshold_backward_sparse_compressed_out
  4632. - func: threshold_backward(Tensor grad_output, Tensor self, Scalar threshold) -> Tensor
  4633. variants: function
  4634. structured_delegate: threshold_backward.grad_input
  4635. dispatch:
  4636. MkldnnCPU: mkldnn_relu_backward
  4637. SparseCPU, SparseCUDA: threshold_backward_sparse
  4638. SparseCsrCPU, SparseCsrCUDA: threshold_backward_sparse_compressed
  4639. NestedTensorCPU, NestedTensorCUDA: threshold_backwards_nested
  4640. tags: pointwise
  4641. - func: tile(Tensor self, int[] dims) -> Tensor
  4642. variants: function, method
  4643. - func: transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a)
  4644. variants: function, method
  4645. device_check: NoCheck
  4646. device_guard: False
  4647. dispatch:
  4648. CompositeExplicitAutograd: transpose
  4649. NestedTensorCPU, NestedTensorCUDA: transpose_nested
  4650. - func: transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a)
  4651. variants: function, method
  4652. device_check: NoCheck
  4653. device_guard: False
  4654. - func: _mkldnn_transpose(Tensor self, int dim0, int dim1) -> Tensor
  4655. device_check: NoCheck
  4656. device_guard: False
  4657. dispatch:
  4658. MkldnnCPU: mkldnn_transpose
  4659. - func: transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)
  4660. variants: method
  4661. device_check: NoCheck
  4662. device_guard: False
  4663. tags: inplace_view
  4664. dispatch:
  4665. CompositeExplicitAutograd: transpose_
  4666. - func: _mkldnn_transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)
  4667. device_check: NoCheck
  4668. device_guard: False
  4669. dispatch:
  4670. MkldnnCPU: mkldnn_transpose_
  4671. autogen: _mkldnn_transpose.out
  4672. - func: one_hot(Tensor self, int num_classes=-1) -> Tensor
  4673. python_module: nn
  4674. variants: function
  4675. tags: dynamic_output_shape
  4676. - func: flip(Tensor self, int[] dims) -> Tensor
  4677. variants: function, method
  4678. dispatch:
  4679. CPU, QuantizedCPU, CUDA, QuantizedCUDA: flip
  4680. MPS: flip_mps
  4681. autogen: flip.out
  4682. tags: core
  4683. - func: fliplr(Tensor self) -> Tensor
  4684. variants: function, method
  4685. - func: flipud(Tensor self) -> Tensor
  4686. variants: function, method
  4687. - func: roll(Tensor self, int[1] shifts, int[1] dims=[]) -> Tensor
  4688. variants: function, method
  4689. dispatch:
  4690. CPU: roll_cpu
  4691. CUDA: roll_cuda
  4692. autogen: roll.out
  4693. # default int[] value [0,1] should not add space after comma, since codegen parser uses ', ' to split args
  4694. - func: rot90(Tensor self, int k=1, int[] dims=[0,1]) -> Tensor
  4695. variants: function, method
  4696. dispatch:
  4697. CompositeExplicitAutograd: rot90
  4698. autogen: rot90.out
  4699. - func: trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor
  4700. - func: trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor
  4701. - func: trapz.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor
  4702. - func: trapz.dx(Tensor y, *, float dx=1, int dim=-1) -> Tensor
  4703. # Fused implementation detail for transformers. Adds in-projection bias to QKV and divides Q by sqrt(D/num_heads).
  4704. - func: _transform_bias_rescale_qkv(Tensor qkv, Tensor qkv_bias, int num_heads) -> (Tensor, Tensor, Tensor)
  4705. dispatch:
  4706. CPU, NestedTensorCPU: transform_bias_rescale_qkv_cpu
  4707. CUDA, NestedTensorCUDA: transform_bias_rescale_qkv_cuda
  4708. autogen: _transform_bias_rescale_qkv.out
  4709. - func: _nested_tensor_from_mask(Tensor t, Tensor mask, bool mask_check=True) -> Tensor
  4710. dispatch:
  4711. CPU, CUDA: NestedTensor_nested_tensor_from_mask
  4712. autogen: _nested_tensor_from_mask.out
  4713. - func: _nested_tensor_from_mask_left_aligned(Tensor t, Tensor mask) -> bool
  4714. dispatch:
  4715. CPU, CUDA: NestedTensor_nested_tensor_from_mask_left_aligned
  4716. - func: _nested_from_padded(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False) -> Tensor
  4717. device_check: NoCheck # cpu_nested_shape_example will always be on CPU
  4718. dispatch:
  4719. CPU: nested_from_padded_generic
  4720. CUDA: nested_from_padded_cuda
  4721. autogen: _nested_from_padded.out
  4722. # These private functions are temporary. They will be updated/deleted when nested tensors switch to using SymInts for their metadata representation
  4723. - func: _nested_tensor_size(Tensor self) -> Tensor
  4724. variants: method
  4725. dispatch:
  4726. NestedTensorCPU, NestedTensorCUDA: _nested_tensor_size
  4727. autogen: _nested_tensor_size.out
  4728. - func: _nested_tensor_strides(Tensor self) -> Tensor
  4729. variants: method
  4730. dispatch:
  4731. NestedTensorCPU, NestedTensorCUDA: _nested_tensor_strides
  4732. autogen: _nested_tensor_strides.out
  4733. - func: _nested_tensor_offsets(Tensor self) -> int[]
  4734. variants: method
  4735. dispatch:
  4736. NestedTensorCPU, NestedTensorCUDA: _nested_tensor_offsets
  4737. # _nested_from_padded is not usable from Python, so
  4738. # _nested_from_padded_and_nested_example is available for testing.
  4739. - func: _nested_from_padded_and_nested_example(Tensor padded, Tensor nt_example) -> Tensor
  4740. dispatch:
  4741. NestedTensorCPU, NestedTensorCUDA: NestedTensor_from_padded_and_nested_example
  4742. autogen: _nested_from_padded_and_nested_example.out
  4743. # The input arguments' types to this functions are temporary. When nested tensors switch to using SymInts for their metadata representation
  4744. # this will need to be updated
  4745. - func: _nested_view_from_buffer(Tensor(a) self, Tensor nested_size, Tensor nested_strides, int[] offsets) -> Tensor(a)
  4746. variants: function
  4747. device_check: NoCheck
  4748. dispatch:
  4749. CPU, CUDA: _nested_view_from_buffer
  4750. - func: _nested_view_from_buffer_copy(Tensor self, Tensor nested_size, Tensor nested_strides, int[] offsets) -> Tensor
  4751. variants: function
  4752. device_check: NoCheck
  4753. tags: view_copy
  4754. dispatch:
  4755. CompositeExplicitAutogradNonFunctional: _nested_view_from_buffer_copy
  4756. autogen: _nested_view_from_buffer_copy.out
  4757. - func: _trilinear(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1) -> Tensor
  4758. dispatch:
  4759. # calls unsqueeze
  4760. CompositeExplicitAutogradNonFunctional: _trilinear
  4761. autogen: _trilinear.out
  4762. - func: triplet_margin_loss(Tensor anchor, Tensor positive, Tensor negative, float margin=1.0, float p=2, float eps=1e-06, bool swap=False, int reduction=Mean) -> Tensor
  4763. - func: trunc(Tensor self) -> Tensor
  4764. structured_delegate: trunc.out
  4765. device_check: NoCheck # TensorIterator
  4766. variants: function, method
  4767. dispatch:
  4768. SparseCPU, SparseCUDA: trunc_sparse
  4769. SparseCsrCPU, SparseCsrCUDA: trunc_sparse_csr
  4770. tags: pointwise
  4771. - func: trunc_(Tensor(a!) self) -> Tensor(a!)
  4772. structured_delegate: trunc.out
  4773. device_check: NoCheck # TensorIterator
  4774. variants: function, method
  4775. dispatch:
  4776. SparseCPU, SparseCUDA: trunc_sparse_
  4777. SparseCsrCPU, SparseCsrCUDA: trunc_sparse_csr_
  4778. tags: pointwise
  4779. - func: trunc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  4780. structured: True
  4781. structured_inherits: TensorIteratorBase
  4782. device_check: NoCheck # TensorIterator
  4783. dispatch:
  4784. CPU, CUDA: trunc_out
  4785. MPS: trunc_out_mps
  4786. SparseCPU, SparseCUDA: trunc_sparse_out
  4787. SparseCsrCPU, SparseCsrCUDA: trunc_sparse_csr_out
  4788. tags: pointwise
  4789. # Alias for trunc
  4790. - func: fix(Tensor self) -> Tensor
  4791. variants: function, method
  4792. - func: fix_(Tensor(a!) self) -> Tensor(a!)
  4793. variants: function, method
  4794. - func: fix.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  4795. - func: type_as(Tensor self, Tensor other) -> Tensor
  4796. variants: method
  4797. - func: _has_compatible_shallow_copy_type(Tensor self, Tensor from) -> bool
  4798. variants: function
  4799. - func: _unique(Tensor self, bool sorted=True, bool return_inverse=False) -> (Tensor, Tensor)
  4800. variants: function
  4801. dispatch:
  4802. CPU: _unique_cpu
  4803. CUDA: _unique_cuda
  4804. autogen: _unique.out
  4805. - func: unique_dim(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)
  4806. variants: function
  4807. dispatch:
  4808. CPU: unique_dim_cpu
  4809. CUDA: unique_dim_cuda
  4810. tags: dynamic_output_shape
  4811. autogen: unique_dim.out
  4812. - func: unique_consecutive(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None) -> (Tensor, Tensor, Tensor)
  4813. variants: function
  4814. dispatch:
  4815. CPU: unique_consecutive_cpu
  4816. CUDA: unique_consecutive_cuda
  4817. MPS: unique_consecutive_mps
  4818. tags: dynamic_output_shape
  4819. autogen: unique_consecutive.out
  4820. - func: unique_dim_consecutive(Tensor self, int dim, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)
  4821. variants: function
  4822. dispatch:
  4823. CPU: unique_dim_consecutive_cpu
  4824. CUDA: unique_dim_consecutive_cuda
  4825. MPS: unique_dim_consecutive_mps
  4826. tags: dynamic_output_shape
  4827. autogen: unique_dim_consecutive.out
  4828. # _unique and _unique_dim are fragile and modifying them easily cause internal break
  4829. # the below operator is a temporary hack for adding return_counts support
  4830. # Please don't rely on these two operators, they will be removed soon
  4831. - func: _unique2(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)
  4832. variants: function
  4833. dispatch:
  4834. CPU: _unique2_cpu
  4835. CUDA: _unique2_cuda
  4836. MPS: _unique2_mps
  4837. tags: dynamic_output_shape
  4838. autogen: _unique2.out
  4839. - func: _unsafe_view(Tensor self, SymInt[] size) -> Tensor
  4840. dispatch:
  4841. CompositeExplicitAutograd: _unsafe_view
  4842. autogen: _unsafe_view.out
  4843. - func: unsqueeze(Tensor(a) self, int dim) -> Tensor(a)
  4844. variants: function, method
  4845. device_check: NoCheck
  4846. device_guard: False
  4847. dispatch:
  4848. CompositeExplicitAutograd: unsqueeze
  4849. SparseCPU, SparseCUDA: unsqueeze_sparse
  4850. QuantizedCPU, QuantizedCUDA: unsqueeze_quantized
  4851. NestedTensorCPU, NestedTensorCUDA: unsqueeze_nested
  4852. tags: core
  4853. - func: unsqueeze_(Tensor(a!) self, int dim) -> Tensor(a!)
  4854. variants: method
  4855. device_check: NoCheck
  4856. device_guard: False
  4857. tags: inplace_view
  4858. dispatch:
  4859. CompositeExplicitAutograd: unsqueeze_
  4860. - func: vander(Tensor x, int? N=None, bool increasing=False) -> Tensor
  4861. - func: var(Tensor self, bool unbiased=True) -> Tensor
  4862. device_check: NoCheck # TensorIterator
  4863. variants: function, method
  4864. cpp_no_default_args: ["unbiased"]
  4865. - func: var.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor
  4866. device_check: NoCheck # TensorIterator
  4867. variants: function, method
  4868. tags: core
  4869. cpp_no_default_args: ["unbiased"]
  4870. - func: var.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> Tensor
  4871. device_check: NoCheck # TensorIterator
  4872. variants: function, method
  4873. dispatch:
  4874. CPU, CUDA: var
  4875. MPS: var_mps
  4876. - func: var.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
  4877. device_check: NoCheck # TensorIterator
  4878. cpp_no_default_args: ["unbiased"]
  4879. - func: var.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
  4880. device_check: NoCheck # TensorIterator
  4881. dispatch:
  4882. CPU, CUDA: var_out
  4883. - func: var.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor
  4884. device_check: NoCheck # TensorIterator
  4885. variants: function, method
  4886. cpp_no_default_args: ["unbiased"]
  4887. - func: var.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
  4888. device_check: NoCheck # TensorIterator
  4889. cpp_no_default_args: ["unbiased"]
  4890. - func: var.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> Tensor
  4891. device_check: NoCheck # TensorIterator
  4892. variants: function, method
  4893. - func: var.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
  4894. device_check: NoCheck # TensorIterator
  4895. variants: function
  4896. - func: var_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)
  4897. device_check: NoCheck # TensorIterator
  4898. variants: function
  4899. cpp_no_default_args: ["unbiased"]
  4900. - func: var_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
  4901. device_check: NoCheck # TensorIterator
  4902. variants: function
  4903. cpp_no_default_args: ["unbiased"]
  4904. - func: var_mean.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor)
  4905. device_check: NoCheck # TensorIterator
  4906. variants: function
  4907. dispatch:
  4908. CPU, CUDA: var_mean
  4909. autogen: var_mean.correction_out
  4910. - func: var_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
  4911. device_check: NoCheck # TensorIterator
  4912. variants: function
  4913. cpp_no_default_args: ["unbiased"]
  4914. - func: var_mean.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor)
  4915. device_check: NoCheck # TensorIterator
  4916. variants: function
  4917. - func: view_as(Tensor(a) self, Tensor other) -> Tensor(a)
  4918. variants: method
  4919. device_check: NoCheck
  4920. device_guard: False
  4921. - func: where.self(Tensor condition, Tensor self, Tensor other) -> Tensor
  4922. device_check: NoCheck # TensorIterator
  4923. variants: function, method
  4924. dispatch:
  4925. CPU, CUDA: where
  4926. MPS: where_mps
  4927. tags: [core, pointwise]
  4928. - func: where.self_out(Tensor condition, Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  4929. device_check: NoCheck # TensorIterator
  4930. dispatch:
  4931. CPU, CUDA: where_self_out
  4932. MPS: where_self_out_mps
  4933. - func: where.ScalarSelf(Tensor condition, Scalar self, Tensor other) -> Tensor
  4934. variants: function
  4935. - func: where.ScalarOther(Tensor condition, Tensor self, Scalar other) -> Tensor
  4936. variants: function, method
  4937. - func: where.Scalar(Tensor condition, Scalar self, Scalar other) -> Tensor
  4938. variants: function
  4939. - func: where(Tensor condition) -> Tensor[]
  4940. device_check: NoCheck # TensorIterator
  4941. variants: function
  4942. - func: norm_except_dim(Tensor v, int pow=2, int dim=0) -> Tensor
  4943. variants: function
  4944. # VariableType::_weight_norm does not want to be given a gap in the autograd graph,
  4945. # so we don't define "dispatch" variants for it.
  4946. - func: _weight_norm(Tensor v, Tensor g, int dim=0) -> Tensor
  4947. variants: function
  4948. - func: _weight_norm_interface(Tensor v, Tensor g, int dim=0) -> (Tensor, Tensor)
  4949. variants: function
  4950. dispatch:
  4951. CPU: weight_norm_cpu
  4952. CUDA: weight_norm_cuda
  4953. autogen: _weight_norm_interface.out
  4954. - func: _weight_norm_interface_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)
  4955. variants: function
  4956. dispatch:
  4957. CPU: weight_norm_backward_cpu
  4958. CUDA: weight_norm_backward_cuda
  4959. autogen: _weight_norm_interface_backward.out
  4960. - func: _weight_norm_differentiable_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)
  4961. variants: function
  4962. - func: zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  4963. device_check: NoCheck
  4964. device_guard: False
  4965. dispatch:
  4966. CompositeExplicitAutograd: zeros
  4967. autogen: zeros.names_out
  4968. - func: _efficientzerotensor(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  4969. dispatch:
  4970. CPU: _efficientzerotensor
  4971. CUDA: _efficientzerotensor_cuda
  4972. Meta: _efficientzerotensor_meta
  4973. autogen: _efficientzerotensor.out
  4974. - func: zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  4975. dispatch:
  4976. CompositeExplicitAutograd: zeros_symint
  4977. - func: zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
  4978. dispatch:
  4979. CompositeExplicitAutograd: zeros_out
  4980. SparseCPU, SparseCUDA, SparseMeta: zeros_sparse_out
  4981. - func: zeros_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
  4982. dispatch:
  4983. # NB: Although this composite mutates on the inside, it is
  4984. # non-differentiable so NonFunctional doesn't apply
  4985. CompositeExplicitAutograd: zeros_like
  4986. autogen: zeros_like.out
  4987. - func: _standard_gamma_grad(Tensor self, Tensor output) -> Tensor
  4988. variants: function
  4989. dispatch:
  4990. CPU: _standard_gamma_grad_cpu
  4991. CUDA: _standard_gamma_grad_cuda
  4992. autogen: _standard_gamma_grad.out
  4993. - func: _standard_gamma(Tensor self, Generator? generator=None) -> Tensor
  4994. variants: function
  4995. dispatch:
  4996. CPU: _s_gamma_cpu
  4997. CUDA: _s_gamma_cuda
  4998. tags: nondeterministic_seeded
  4999. autogen: _standard_gamma.out
  5000. - func: _dirichlet_grad(Tensor x, Tensor alpha, Tensor total) -> Tensor
  5001. dispatch:
  5002. CPU: _dirichlet_grad_cpu
  5003. CUDA: _dirichlet_grad_cuda
  5004. autogen: _dirichlet_grad.out
  5005. - func: _sample_dirichlet(Tensor self, Generator? generator=None) -> Tensor
  5006. tags: nondeterministic_seeded
  5007. variants: function
  5008. dispatch:
  5009. CPU: _s_dirichlet_cpu
  5010. CUDA: _s_dirichlet_cuda
  5011. autogen: _sample_dirichlet.out
  5012. - func: poisson(Tensor self, Generator? generator=None) -> Tensor
  5013. device_check: NoCheck # TensorIterator
  5014. dispatch:
  5015. CPU: _s_poisson_cpu
  5016. CUDA: _s_poisson_cuda
  5017. tags: nondeterministic_seeded
  5018. autogen: poisson.out
  5019. - func: binomial(Tensor count, Tensor prob, Generator? generator=None) -> Tensor
  5020. device_check: NoCheck # TensorIterator
  5021. dispatch:
  5022. CPU: _s_binomial_cpu
  5023. CUDA: _s_binomial_cuda
  5024. tags: nondeterministic_seeded
  5025. autogen: binomial.out
  5026. # When more variants get ported to native, this dispatch will get more
  5027. # complicated
  5028. - func: native_norm(Tensor self, Scalar p=2) -> Tensor
  5029. dispatch:
  5030. SparseCPU, SparseCUDA: norm_sparse
  5031. autogen: native_norm.out
  5032. - func: native_norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype) -> Tensor
  5033. dispatch:
  5034. SparseCPU, SparseCUDA: norm_sparse
  5035. autogen: native_norm.ScalarOpt_dim_dtype_out
  5036. # TODO: reduce signatures down to one when optional args is available
  5037. - func: _sparse_sum(Tensor self) -> Tensor
  5038. - func: _sparse_sum.dtype(Tensor self, *, ScalarType dtype) -> Tensor
  5039. - func: _sparse_sum.dim(Tensor self, int[1] dim) -> Tensor
  5040. dispatch:
  5041. CompositeExplicitAutograd: _sparse_sum
  5042. autogen: _sparse_sum.dim_out
  5043. - func: _sparse_sum.dim_dtype(Tensor self, int[1] dim, *, ScalarType dtype) -> Tensor
  5044. - func: _sparse_sum_backward(Tensor grad, Tensor self, int[] dim) -> Tensor
  5045. dispatch:
  5046. SparseCPU: _sparse_sum_backward_cpu
  5047. SparseCUDA: _sparse_sum_backward_cuda
  5048. autogen: _sparse_sum_backward.out
  5049. - func: _sparse_csr_sum.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
  5050. dispatch:
  5051. SparseCsrCPU: _sparse_csr_sum_cpu
  5052. SparseCsrCUDA: _sparse_csr_sum_cuda
  5053. autogen: _sparse_csr_sum.dim_dtype_out
  5054. - func: _sparse_csr_prod.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
  5055. dispatch:
  5056. SparseCsrCPU: _sparse_csr_prod_cpu
  5057. SparseCsrCUDA: _sparse_csr_prod_cuda
  5058. autogen: _sparse_csr_prod.dim_dtype_out
  5059. - func: _sparse_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
  5060. python_module: sparse
  5061. variants: function
  5062. - func: _sparse_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
  5063. python_module: sparse
  5064. variants: function
  5065. - func: _sparse_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
  5066. python_module: sparse
  5067. dispatch:
  5068. SparseCPU: softmax_sparse_cpu
  5069. SparseCUDA: softmax_sparse_cuda
  5070. autogen: _sparse_softmax.out
  5071. - func: _sparse_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor
  5072. dispatch:
  5073. SparseCPU: softmax_backward_sparse_cpu
  5074. SparseCUDA: softmax_backward_sparse_cuda
  5075. autogen: _sparse_softmax_backward_data.out
  5076. - func: _sparse_log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
  5077. python_module: sparse
  5078. variants: function
  5079. - func: _sparse_log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
  5080. python_module: sparse
  5081. variants: function
  5082. - func: _sparse_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
  5083. python_module: sparse
  5084. dispatch:
  5085. SparseCPU: log_softmax_sparse_cpu
  5086. SparseCUDA: log_softmax_sparse_cuda
  5087. autogen: _sparse_log_softmax.out
  5088. - func: _sparse_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor
  5089. dispatch:
  5090. SparseCPU: log_softmax_backward_sparse_cpu
  5091. SparseCUDA: log_softmax_backward_sparse_cuda
  5092. autogen: _sparse_log_softmax_backward_data.out
  5093. - func: _spdiags(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None) -> Tensor
  5094. python_module: sparse
  5095. dispatch:
  5096. CPU: spdiags
  5097. autogen: _spdiags.out
  5098. - func: norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor
  5099. device_check: NoCheck # TensorIterator
  5100. variants: function, method
  5101. dispatch:
  5102. CompositeExplicitAutograd: norm
  5103. autogen: norm.ScalarOpt_dtype_out
  5104. - func: norm.Scalar(Tensor self, Scalar p=2) -> Tensor
  5105. device_check: NoCheck # TensorIterator
  5106. variants: function, method
  5107. dispatch:
  5108. CompositeExplicitAutograd: norm
  5109. autogen: norm.Scalar_out
  5110. - func: norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor
  5111. structured_delegate: norm.dtype_out
  5112. device_check: NoCheck # TensorIterator
  5113. variants: function, method
  5114. dispatch:
  5115. SparseCPU, SparseCUDA: sparse_dtype_norm
  5116. - func: norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor
  5117. structured_delegate: norm.out
  5118. device_check: NoCheck # TensorIterator
  5119. variants: function, method
  5120. dispatch:
  5121. SparseCPU, SparseCUDA: sparse_norm
  5122. - func: norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)
  5123. structured: True
  5124. device_check: NoCheck # TensorIterator
  5125. dispatch:
  5126. CPU, CUDA: norm_dtype_out
  5127. MPS: norm_dtype_out_mps
  5128. - func: norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
  5129. structured: True
  5130. device_check: NoCheck # TensorIterator
  5131. dispatch:
  5132. CPU, CUDA: norm_out
  5133. MPS: norm_out_mps
  5134. # These four redispatch in their implementation, so OK to be CompositeImplicitAutograd
  5135. - func: norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor
  5136. device_check: NoCheck # TensorIterator
  5137. variants: function, method
  5138. - func: norm.names_ScalarOpt_dim(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False) -> Tensor
  5139. device_check: NoCheck # TensorIterator
  5140. variants: function, method
  5141. - func: norm.names_dtype_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)
  5142. device_check: NoCheck # TensorIterator
  5143. - func: norm.names_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
  5144. device_check: NoCheck # TensorIterator
  5145. - func: frexp.Tensor(Tensor self) -> (Tensor mantissa, Tensor exponent)
  5146. variants: method, function
  5147. dispatch:
  5148. CompositeExplicitAutograd: frexp
  5149. tags: pointwise
  5150. - func: frexp.Tensor_out(Tensor self, *, Tensor(a!) mantissa, Tensor(b!) exponent) -> (Tensor(a!) mantissa, Tensor(b!) exponent)
  5151. dispatch:
  5152. CPU, CUDA: frexp_out
  5153. tags: pointwise
  5154. # Deprecated (v.1.12)
  5155. - func: frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
  5156. variants: function
  5157. # Deprecated (v.1.12)
  5158. - func: frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
  5159. variants: function
  5160. # Deprecated (v.1.12)
  5161. - func: nuclear_norm(Tensor self, bool keepdim=False) -> Tensor
  5162. variants: function
  5163. # Deprecated (v.1.12)
  5164. - func: nuclear_norm.out(Tensor self, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
  5165. variants: function
  5166. # Deprecated (v.1.12)
  5167. - func: nuclear_norm.dim(Tensor self, int[2] dim, bool keepdim=False) -> Tensor
  5168. variants: function
  5169. # Deprecated (v.1.12)
  5170. - func: nuclear_norm.dim_out(Tensor self, int[2] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
  5171. variants: function
  5172. - func: clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor
  5173. variants: function, method
  5174. dispatch:
  5175. CompositeExplicitAutograd: clone
  5176. SparseCPU, SparseCUDA: clone_sparse
  5177. SparseCsrCPU, SparseCsrCUDA: clone_sparse_compressed
  5178. MkldnnCPU: mkldnn_clone
  5179. QuantizedCPU, QuantizedCUDA: quantized_clone
  5180. NestedTensorCPU, NestedTensorCUDA: clone_nested
  5181. autogen: clone.out
  5182. tags: core
  5183. - func: positive(Tensor(a) self) -> Tensor(a)
  5184. variants: function, method
  5185. tags: pointwise
  5186. - func: resize_as_(Tensor(a!) self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor(a!)
  5187. use_const_ref_for_mutable_tensors: True
  5188. variants: function, method
  5189. dispatch:
  5190. CompositeExplicitAutograd: resize_as_
  5191. autogen: resize_as, resize_as.out
  5192. - func: resize_as_sparse_(Tensor(a!) self, Tensor the_template) -> Tensor(a!)
  5193. use_const_ref_for_mutable_tensors: True
  5194. variants: function, method
  5195. dispatch:
  5196. SparseCPU, SparseCUDA: resize_as_sparse_
  5197. SparseCsrCPU, SparseCsrCUDA: resize_as_sparse_compressed_
  5198. autogen: resize_as_sparse, resize_as_sparse.out
  5199. - func: zero_(Tensor(a!) self) -> Tensor(a!)
  5200. device_check: NoCheck # TensorIterator
  5201. variants: method, function
  5202. dispatch:
  5203. CPU, CUDA: zero_
  5204. MPS: zero_mps_
  5205. Meta: zero_meta_
  5206. SparseCPU, SparseCUDA, SparseMeta: zero_sparse_
  5207. SparseCsrCPU, SparseCsrCUDA: zero_sparse_csr_
  5208. MkldnnCPU: mkldnn_zero_
  5209. autogen: zero, zero.out
  5210. - func: sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
  5211. device_check: NoCheck # TensorIterator
  5212. structured: True
  5213. structured_inherits: TensorIteratorBase
  5214. dispatch:
  5215. CPU, CUDA: sub_out
  5216. MPS: sub_out_mps
  5217. SparseCPU, SparseCUDA: sub_out_sparse
  5218. tags: pointwise
  5219. - func: sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
  5220. device_check: NoCheck # TensorIterator
  5221. variants: function, method
  5222. structured_delegate: sub.out
  5223. dispatch:
  5224. SparseCPU, SparseCUDA: sub_sparse
  5225. ZeroTensor: sub_zerotensor
  5226. tags: [core, pointwise]
  5227. - func: sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
  5228. device_check: NoCheck # TensorIterator
  5229. variants: method
  5230. structured_delegate: sub.out
  5231. dispatch:
  5232. SparseCPU, SparseCUDA: sub_sparse_
  5233. tags: pointwise
  5234. # For C++ only, until we have conversion from C++ numbers to Tensor
  5235. - func: sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
  5236. device_check: NoCheck # TensorIterator
  5237. variants: function, method
  5238. dispatch:
  5239. CompositeExplicitAutograd: sub
  5240. tags: [core, pointwise]
  5241. - func: sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
  5242. device_check: NoCheck # TensorIterator
  5243. variants: method
  5244. dispatch:
  5245. CompositeExplicitAutograd: sub_
  5246. autogen: sub.Scalar_out
  5247. tags: pointwise
  5248. # subtract, alias for sub
  5249. - func: subtract.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
  5250. - func: subtract.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
  5251. variants: function, method
  5252. - func: subtract_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
  5253. variants: method
  5254. # For C++ only, until we have conversion from C++ numbers to Tensor
  5255. - func: subtract.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
  5256. variants: function, method
  5257. - func: subtract_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
  5258. variants: method
  5259. - func: rsub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
  5260. device_check: NoCheck # TensorIterator
  5261. variants: function
  5262. dispatch:
  5263. CPU, CUDA: rsub
  5264. autogen: rsub.Tensor_out
  5265. - func: heaviside.out(Tensor self, Tensor values, *, Tensor(a!) out) -> Tensor(a!)
  5266. structured: True
  5267. structured_inherits: TensorIteratorBase
  5268. device_check: NoCheck # TensorIterator
  5269. dispatch:
  5270. CPU, CUDA: heaviside_out
  5271. tags: pointwise
  5272. - func: heaviside(Tensor self, Tensor values) -> Tensor
  5273. device_check: NoCheck # TensorIterator
  5274. variants: function, method
  5275. structured_delegate: heaviside.out
  5276. tags: pointwise
  5277. - func: heaviside_(Tensor(a!) self, Tensor values) -> Tensor(a!)
  5278. device_check: NoCheck # TensorIterator
  5279. variants: method
  5280. structured_delegate: heaviside.out
  5281. # For C++ only, until we have conversion from C++ numbers to Tensor
  5282. - func: rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
  5283. device_check: NoCheck # TensorIterator
  5284. variants: function
  5285. dispatch:
  5286. CompositeExplicitAutograd: rsub
  5287. autogen: rsub.Scalar_out
  5288. # Functionally the same as addmm, but we give it a different derivative formula
  5289. # that doesn't propagate gradients to non-present entries on sparse.
  5290. tags: pointwise
  5291. - func: _sparse_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
  5292. python_module: sparse
  5293. dispatch:
  5294. CompositeExplicitAutograd: _sparse_addmm
  5295. autogen: _sparse_addmm.out
  5296. - func: sparse_sampled_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
  5297. python_module: sparse
  5298. dispatch:
  5299. SparseCsrCUDA: sparse_sampled_addmm_out_sparse_csr_cuda
  5300. SparseCsrCPU: sparse_sampled_addmm_out_sparse_csr_cpu
  5301. - func: sparse_sampled_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
  5302. python_module: sparse
  5303. dispatch:
  5304. SparseCsrCUDA: sparse_sampled_addmm_sparse_csr_cuda
  5305. SparseCsrCPU: sparse_sampled_addmm_sparse_csr_cpu
  5306. - func: _sparse_mm_reduce_impl(Tensor self, Tensor other, str reduce) -> (Tensor, Tensor)
  5307. python_module: sparse
  5308. dispatch:
  5309. SparseCsrCPU: _sparse_mm_reduce_impl_sparse_csr_cpu
  5310. - func: _sparse_mm_reduce_impl_backward(Tensor self, Tensor grad_out, Tensor weight, str reduce, Tensor arg_out, bool[2] output_mask) -> (Tensor, Tensor)
  5311. python_module: sparse
  5312. dispatch:
  5313. SparseCsrCPU: _sparse_mm_reduce_impl_backward_sparse_csr_cpu
  5314. - func: addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
  5315. structured: True
  5316. dispatch:
  5317. CPU: addmm_out_cpu
  5318. CUDA: addmm_out_cuda
  5319. MPS: addmm_out_mps
  5320. SparseCPU: addmm_out_sparse_dense_cpu
  5321. SparseCUDA: addmm_out_sparse_dense_cuda
  5322. SparseCsrCPU: addmm_out_sparse_compressed_cpu
  5323. SparseCsrCUDA: addmm_out_sparse_compressed_cuda
  5324. - func: addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
  5325. structured_delegate: addmm.out
  5326. variants: function, method
  5327. dispatch:
  5328. SparseCPU: addmm_sparse_dense_cpu
  5329. SparseCUDA: addmm_sparse_dense_cuda
  5330. SparseCsrCPU, SparseCsrCUDA: addmm_sparse_compressed_dense
  5331. tags: core
  5332. - func: addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
  5333. structured_delegate: addmm.out
  5334. variants: method
  5335. dispatch:
  5336. # Warning! For whatever reason, the inplace sparse addmm is NON
  5337. # broadcasting
  5338. SparseCPU: s_addmm_sparse_dense_cpu_
  5339. SparseCUDA: s_addmm_sparse_dense_cuda_
  5340. - func: _addmm_activation.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False, Tensor(a!) out) -> Tensor(a!)
  5341. structured: True
  5342. dispatch:
  5343. CPU: addmm_activation_out_cpu
  5344. CUDA: addmm_activation_out_cuda
  5345. - func: _addmm_activation(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False) -> Tensor
  5346. structured_delegate: _addmm_activation.out
  5347. variants: function, method
  5348. # NOTE [ Sparse: autograd and API ]
  5349. #
  5350. #
  5351. # Sparse Tensor Constructors
  5352. # ~~~~~~~~~~~~~~~~~~~~~~~~~~
  5353. #
  5354. # The API entry points to sparse tensor construction should be
  5355. # `sparse_coo tensor` and `_sparse_coo_tensor_unsafe`. Depending on whether the
  5356. # indices and values tensors are given, they eventually dispatch to either
  5357. # `sparse_coo_tensor_with_dims` or `sparse_coo_tensor_with_dims_and_tensors`.
  5358. #
  5359. # The autograd support for ctor is implement on `sparse_coo_tensor_with_dims_and_tensors`.
  5360. #
  5361. # The API methods `sparse_coo tensor` and `_sparse_coo_tensor_unsafe`
  5362. # **must not** have specific type dispatches because otherwise codegen will
  5363. # consider them as abstract methods (see Note [Abstract ATen methods]), dispatch
  5364. # using **Tensor** type, and thus lose autograd tracking on the actual method
  5365. # they dispatch to, e.g., `sparse_coo_tensor_with_dims_and_tensors`.
  5366. #
  5367. #
  5368. # Sparse Methods API Design
  5369. # ~~~~~~~~~~~~~~~~~~~~~~~~~
  5370. #
  5371. # Goals: 1. Flexible API for users to write custom sparse ops
  5372. # 2. ctor and member accessor with autograd support
  5373. #
  5374. # To achieve 1, we need to provide a set of *dangerous* APIs (dangerous in the
  5375. # sense that misusing them will break sparse tensor invariant and may out in
  5376. # unexpected behavior, e.g., crash). These methods are all prefixed with
  5377. # underscore "_" to indicate that they should be used with care. We provide:
  5378. #
  5379. # + `_indices()`: returns the *raw* indices within the sparse tensor (not just
  5380. # sharing storage). Any inplace operation will change the
  5381. # actual indices, including t_, set_, as_strided_, resize_,
  5382. # etc.
  5383. # + `_values()`: returns the *raw* values within the sparse tensor. Similar
  5384. # semantics as `_indices()`
  5385. # + `_nnz()`: returns the number of non-zero entries. This will always be
  5386. # determined by the shapes of indices and values.
  5387. # + `_coalesced_(bool)`: inplace sets whether the tensor is coalesced, and
  5388. # returns itself.
  5389. #
  5390. # These methods are very useful in writing new operations, e.g., a custom
  5391. # autograd Function.
  5392. #
  5393. # We also provide other public *safe* APIs:
  5394. # + `indices()`: returns a **view** of the indices tensor if the sparse tensor
  5395. # is **coalesced**.
  5396. # + `values()`: returns a **view** of the values tensor if the containing
  5397. # sparse tensor is **coalesced**.
  5398. # + `sparse_dim()`: number of sparse dimensions
  5399. # + `dense_dim()`: number of dense dimensions
  5400. # + `is_coalesced()`: whether the sparse tensor is coalesced
  5401. #
  5402. # `_indices()` and `_values()` should returns the raw indices and values dense
  5403. # tensors within a sparse tensor. They can be quite unsafe with inplace
  5404. # operations like `t_()`, and exposes uncoalesced indices and values. The public
  5405. # recommended API is `indices()` and `values()`, both of which first check that
  5406. # the tensor is coalesced and return views on those tensors.
  5407. #
  5408. #
  5409. # Autograd Support
  5410. # ~~~~~~~~~~~~~~~~
  5411. #
  5412. # Autograd is supported on `values()` and sparse tensor ctor with indices and
  5413. # values tensors. E.g., `torch.sparse_coo_tensor(i, v).values().sum()` is
  5414. # differentiable w.r.t. `v`.
  5415. #
  5416. # NB: The `values()` and `_values()` operators are special in that they are
  5417. # layout-aware, i.e., the output depends not just on the data it represents, but
  5418. # also on the input layout details (in this case, the `indices` tensor). See
  5419. # NOTE [ as_strided Backward and layout-aware/agnostic autograd ] in Functions.cpp
  5420. # for discussion on layout-aware vs layout-agnostic autograd. Since PyTorch ops
  5421. # operate in the layout-agnostic mode, similar to `as_strided`, backward of
  5422. # these two operators need to consider them in a layout-agnostic way:
  5423. # + `values()`:
  5424. # Input is coalesced.
  5425. # We just pretend having `input.indices()` as an additional argument
  5426. # `input_indices`, then forward is similar to
  5427. # `input.to(kStrided).index_select(input_indices)` regardless of the layout.
  5428. # Note that `values()` normally is layout-aware even if we constrain
  5429. # ourselves on sparse inputs since it may include all zeros values entries
  5430. # as "present" entries.
  5431. # + `_values()`:
  5432. # Input may be uncoalesced.
  5433. # It is not straightforward to construct a layout-agnostic version because
  5434. # duplicate indices entries may exist and additional parameterization is
  5435. # needed to distribute the value into different values entries. Furthermore,
  5436. # this op is intended to provide ways to write custom sparse ops, rather
  5437. # than being used in autograd graph, so it is marked as *non-differentiable*
  5438. # in derivatives.yaml.
  5439. #
  5440. # Before reading the following, see NOTE [ Autograd Variable Views ] in
  5441. # variable.h for details on views that are tracked by autograd, and views that
  5442. # are not.
  5443. #
  5444. # Moreover, these methods return tensors that share storage with inputs, so we
  5445. # mark these methods as view ops to support autograd history tracking.
  5446. # The sparse tensor ctor output should technically be view of both input indices
  5447. # and values tensors, but currently we only support setting as view of a single
  5448. # Variable, so it is only view of the values tensor.
  5449. # TODO: clone indices in sparse tensor ctor.
  5450. #
  5451. # For other methods that return outputs that share storage with inputs, i.e.,
  5452. # `indices()` and `_indices()`. We mark their outputs as non-differentiable, so
  5453. # the view relation is not tracked by autograd, but the version counter is still
  5454. # shared. In other words, their outputs are non-differentiable views of the
  5455. # sparse tensor.
  5456. # FIXME: would be nicer if TensorOptions was optional based; not adding default arguments for options given
  5457. # the default would never make sense.
  5458. - func: sparse_compressed_tensor.comp_plain_value_size(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
  5459. - func: sparse_csr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
  5460. - func: sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
  5461. - func: sparse_bsr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
  5462. - func: sparse_bsc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
  5463. - func: sparse_compressed_tensor.comp_plain_value(Tensor compressed_indices, Tensor plain_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
  5464. - func: sparse_csr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
  5465. - func: sparse_csc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
  5466. - func: sparse_bsr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
  5467. - func: sparse_bsc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
  5468. - func: _sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  5469. - func: _sparse_csr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  5470. - func: _sparse_csc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  5471. - func: _sparse_bsr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  5472. - func: _sparse_bsc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  5473. - func: sparse_coo_tensor.size(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
  5474. dispatch:
  5475. CompositeExplicitAutograd: sparse_coo_tensor
  5476. autogen: sparse_coo_tensor.size_out
  5477. - func: sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  5478. - func: sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  5479. - func: _sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  5480. dispatch:
  5481. CompositeImplicitAutograd: _sparse_coo_tensor_unsafe_symint
  5482. - func: _validate_sparse_coo_tensor_args(Tensor indices, Tensor values, int[] size) -> ()
  5483. - func: _validate_sparse_compressed_tensor_args(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, Layout layout) -> ()
  5484. - func: _validate_sparse_csr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> ()
  5485. - func: _validate_sparse_csc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> ()
  5486. - func: _validate_sparse_bsr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> ()
  5487. - func: _validate_sparse_bsc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> ()
  5488. - func: _sparse_coo_tensor_with_dims(int sparse_dim, int dense_dim, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
  5489. dispatch:
  5490. SparseCPU, SparseCUDA, SparseMeta, Meta: new_with_dims_sparse
  5491. autogen: _sparse_coo_tensor_with_dims.out
  5492. - func: _sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
  5493. dispatch:
  5494. SparseCPU, SparseCUDA, SparseMeta, Meta: new_with_dims_and_tensor_sparse_symint
  5495. autogen: _sparse_coo_tensor_with_dims_and_tensors.out
  5496. - func: sparse_resize_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)
  5497. use_const_ref_for_mutable_tensors: True
  5498. variants: method
  5499. dispatch:
  5500. SparseCPU, SparseCUDA, SparseMeta: sparse_resize_
  5501. autogen: sparse_resize, sparse_resize.out
  5502. - func: sparse_resize_and_clear_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)
  5503. use_const_ref_for_mutable_tensors: True
  5504. variants: method
  5505. dispatch:
  5506. SparseCPU, SparseCUDA, SparseMeta: sparse_resize_and_clear_
  5507. autogen: sparse_resize_and_clear, sparse_resize_and_clear.out
  5508. - func: sparse_mask(Tensor self, Tensor mask) -> Tensor
  5509. variants: method
  5510. dispatch:
  5511. SparseCPU, SparseCUDA: sparse_mask
  5512. SparseCsrCPU, SparseCsrCUDA: sparse_mask_sparse_csr
  5513. autogen: sparse_mask.out
  5514. - func: _to_cpu(Tensor[] tensors) -> Tensor[]
  5515. variants: function
  5516. - func: to_dense(Tensor self, ScalarType? dtype=None) -> Tensor
  5517. variants: method
  5518. # Special case of to_dense with custom derivative
  5519. - func: _to_dense(Tensor self, ScalarType? dtype=None) -> Tensor
  5520. variants: method
  5521. dispatch:
  5522. SparseCPU, SparseCUDA: sparse_to_dense
  5523. SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_dense
  5524. MkldnnCPU: mkldnn_to_dense
  5525. autogen: _to_dense.out
  5526. - func: to_dense_backward(Tensor grad, Tensor input) -> Tensor
  5527. - func: sparse_dim(Tensor self) -> int
  5528. variants: method
  5529. dispatch:
  5530. CPU, CUDA: sparse_dim_strided
  5531. SparseCPU, SparseCUDA, SparseMeta: sparse_dim_sparse
  5532. SparseCsrCPU, SparseCsrCUDA: sparse_dim_sparse_csr
  5533. device_check: NoCheck
  5534. device_guard: False
  5535. # legacy method
  5536. - func: _dimI(Tensor self) -> int
  5537. variants: method
  5538. dispatch:
  5539. SparseCPU, SparseCUDA: sparse_dim_sparse
  5540. device_check: NoCheck
  5541. device_guard: False
  5542. - func: dense_dim(Tensor self) -> int
  5543. variants: method
  5544. dispatch:
  5545. CPU, CUDA: dense_dim_strided
  5546. SparseCPU, SparseCUDA, SparseMeta: dense_dim_sparse
  5547. SparseCsrCPU, SparseCsrCUDA: dense_dim_sparse_csr
  5548. device_check: NoCheck
  5549. device_guard: False
  5550. # legacy method
  5551. - func: _dimV(Tensor self) -> int
  5552. variants: method
  5553. dispatch:
  5554. SparseCPU, SparseCUDA, SparseMeta: dense_dim_sparse
  5555. device_check: NoCheck
  5556. device_guard: False
  5557. - func: _nnz(Tensor self) -> int
  5558. variants: method
  5559. dispatch:
  5560. SparseCPU, SparseCUDA, SparseMeta: _nnz_sparse
  5561. SparseCsrCPU, SparseCsrCUDA: _nnz_sparse_csr
  5562. device_check: NoCheck
  5563. device_guard: False
  5564. # NOTE: [ coalesce autograd ]
  5565. # coalesce returns self directly for already coalesced sparse tensors.
  5566. # This means coalesce cannot have a derivative registered, otherwise it creates
  5567. # circular references in the autograd graph (see gh-52874).
  5568. # Instead, the derivative is registered on the slow-path "_coalesce"
  5569. - func: coalesce(Tensor(a) self) -> Tensor(a)
  5570. variants: method
  5571. - func: _coalesce(Tensor self) -> Tensor
  5572. dispatch:
  5573. SparseCPU: _coalesce_sparse_cpu
  5574. SparseCUDA: _coalesce_sparse_cuda
  5575. autogen: _coalesce.out
  5576. - func: is_coalesced(Tensor self) -> bool
  5577. variants: method
  5578. dispatch:
  5579. SparseCPU, SparseCUDA, SparseMeta: is_coalesced_sparse
  5580. CompositeExplicitAutograd: is_coalesced_default
  5581. device_check: NoCheck
  5582. device_guard: False
  5583. - func: _indices(Tensor(a) self) -> Tensor(a)
  5584. variants: method
  5585. dispatch:
  5586. SparseCPU, SparseCUDA, SparseMeta: _indices_sparse
  5587. device_check: NoCheck
  5588. device_guard: False
  5589. - func: _values(Tensor(a) self) -> Tensor(a)
  5590. variants: method
  5591. dispatch:
  5592. SparseCPU, SparseCUDA, SparseMeta: _values_sparse
  5593. device_check: NoCheck
  5594. device_guard: False
  5595. # This method doesn't do any check but only directly sets the flag. So it can be
  5596. # a bit unsafe. Similar to _indices and _values, this is useful for implementing
  5597. # custom sparse operations in Python/C++ extension.
  5598. - func: _coalesced_(Tensor(a!) self, bool coalesced) -> Tensor(a!)
  5599. variants: method
  5600. dispatch:
  5601. SparseCPU, SparseCUDA, SparseMeta: _coalesced_sparse_
  5602. device_check: NoCheck
  5603. device_guard: False
  5604. autogen: _coalesced, _coalesced.out
  5605. - func: indices(Tensor(a) self) -> Tensor(a)
  5606. variants: method
  5607. dispatch:
  5608. SparseCPU, SparseCUDA, SparseMeta: indices_sparse
  5609. CompositeExplicitAutograd: indices_default
  5610. device_check: NoCheck
  5611. device_guard: False
  5612. - func: values(Tensor(a) self) -> Tensor(a)
  5613. variants: method
  5614. dispatch:
  5615. SparseCPU, SparseCUDA, SparseMeta: values_sparse
  5616. SparseCsrCPU, SparseCsrCUDA: values_sparse_csr
  5617. NestedTensorCPU, NestedTensorCUDA: values_nested
  5618. CompositeExplicitAutograd: values_default
  5619. device_check: NoCheck
  5620. device_guard: False
  5621. - func: crow_indices(Tensor(a) self) -> Tensor(a)
  5622. variants: method
  5623. dispatch:
  5624. SparseCsrCPU, SparseCsrCUDA: crow_indices_sparse_csr
  5625. CompositeExplicitAutograd: crow_indices_default
  5626. device_check: NoCheck
  5627. device_guard: False
  5628. - func: col_indices(Tensor(a) self) -> Tensor(a)
  5629. variants: method
  5630. dispatch:
  5631. SparseCsrCPU, SparseCsrCUDA: col_indices_sparse_csr
  5632. CompositeExplicitAutograd: col_indices_default
  5633. device_check: NoCheck
  5634. device_guard: False
  5635. - func: ccol_indices(Tensor(a) self) -> Tensor(a)
  5636. variants: method
  5637. dispatch:
  5638. SparseCsrCPU, SparseCsrCUDA: ccol_indices_sparse_csr
  5639. CompositeExplicitAutograd: ccol_indices_default
  5640. device_check: NoCheck
  5641. device_guard: False
  5642. - func: row_indices(Tensor(a) self) -> Tensor(a)
  5643. variants: method
  5644. dispatch:
  5645. SparseCsrCPU, SparseCsrCUDA: row_indices_sparse_csr
  5646. CompositeExplicitAutograd: row_indices_default
  5647. device_check: NoCheck
  5648. device_guard: False
  5649. - func: hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
  5650. dispatch:
  5651. SparseCPU: hspmm_out_sparse_cpu
  5652. SparseCUDA: hspmm_out_sparse_cuda
  5653. - func: hspmm(Tensor mat1, Tensor mat2) -> Tensor
  5654. dispatch:
  5655. SparseCPU: hspmm_sparse_cpu
  5656. SparseCUDA: hspmm_sparse_cuda
  5657. - func: copy_sparse_to_sparse_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)
  5658. device_check: NoCheck # Allows copy into different device
  5659. variants: function
  5660. dispatch:
  5661. SparseCPU, SparseCUDA: copy_sparse_
  5662. autogen: copy_sparse_to_sparse, copy_sparse_to_sparse.out
  5663. # By adding the AutogradNestedTensor this makes this function CompositeImplicit-like for nested tensors
  5664. - func: unbind.int(Tensor(a -> *) self, int dim=0) -> Tensor(a)[]
  5665. variants: function, method
  5666. dispatch:
  5667. CompositeExplicitAutograd: unbind
  5668. CompositeImplicitAutogradNestedTensor: NestedTensor_unbind
  5669. - func: unbind.Dimname(Tensor(a -> *) self, Dimname dim) -> Tensor(a)[]
  5670. variants: function, method
  5671. - func: to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor
  5672. variants: method
  5673. dispatch:
  5674. CPU, CUDA: dense_to_sparse
  5675. SparseCPU, SparseCUDA: sparse_coo_to_sparse
  5676. SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_sparse
  5677. autogen: to_sparse.sparse_dim_out
  5678. - func: to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor
  5679. variants: method
  5680. dispatch:
  5681. CPU, CUDA: dense_to_sparse
  5682. SparseCPU, SparseCUDA: sparse_coo_to_sparse
  5683. SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_sparse
  5684. autogen: to_sparse.out
  5685. - func: to_sparse_csr(Tensor self, int? dense_dim=None) -> Tensor
  5686. variants: method
  5687. dispatch:
  5688. CPU, CUDA: dense_to_sparse_csr
  5689. SparseCPU, SparseCUDA: coo_to_sparse_csr
  5690. SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_sparse_csr
  5691. autogen: to_sparse_csr.out
  5692. - func: to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor
  5693. variants: method
  5694. dispatch:
  5695. CPU, CUDA: dense_to_sparse_csc
  5696. SparseCPU, SparseCUDA: coo_to_sparse_csc
  5697. SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_sparse_csc
  5698. autogen: to_sparse_csc.out
  5699. - func: to_sparse_bsr(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor
  5700. variants: method
  5701. dispatch:
  5702. CPU, CUDA: dense_to_sparse_bsr
  5703. SparseCPU, SparseCUDA: coo_to_sparse_bsr
  5704. SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_sparse_bsr
  5705. autogen: to_sparse_bsr.out
  5706. - func: to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor
  5707. variants: method
  5708. dispatch:
  5709. CPU, CUDA: dense_to_sparse_bsc
  5710. SparseCPU, SparseCUDA: coo_to_sparse_bsc
  5711. SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_sparse_bsc
  5712. autogen: to_sparse_bsc.out
  5713. - func: to_mkldnn(Tensor self, ScalarType? dtype=None) -> Tensor
  5714. variants: method
  5715. dispatch:
  5716. CPU: dense_to_mkldnn
  5717. autogen: to_mkldnn.out
  5718. - func: mkldnn_reorder_conv2d_weight(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1, int[]? input_size=None) -> Tensor
  5719. variants: function
  5720. python_module: nn
  5721. dispatch:
  5722. MkldnnCPU: mkldnn_reorder_conv2d_weight
  5723. autogen: mkldnn_reorder_conv2d_weight.out
  5724. - func: mkldnn_reorder_conv3d_weight(Tensor self, int[3] padding=0, int[3] stride=1, int[3] dilation=1, int groups=1) -> Tensor
  5725. variants: function
  5726. python_module: nn
  5727. dispatch:
  5728. MkldnnCPU: mkldnn_reorder_conv3d_weight
  5729. autogen: mkldnn_reorder_conv3d_weight.out
  5730. - func: to_mkldnn_backward(Tensor grad, Tensor input) -> Tensor
  5731. - func: quantize_per_tensor_dynamic(Tensor self, ScalarType dtype, bool reduce_range) -> Tensor
  5732. variants: function
  5733. dispatch:
  5734. CPU, CUDA: quantize_per_tensor_dynamic
  5735. autogen: quantize_per_tensor_dynamic.out
  5736. - func: quantize_per_tensor(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor
  5737. variants: function
  5738. dispatch:
  5739. CPU, CUDA: quantize_per_tensor
  5740. autogen: quantize_per_tensor.out
  5741. - func: quantize_per_tensor.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype) -> Tensor
  5742. variants: function
  5743. dispatch:
  5744. CPU, CUDA: quantize_per_tensor_tensor_qparams
  5745. autogen: quantize_per_tensor.tensor_qparams_out
  5746. - func: quantize_per_tensor.tensors(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype) -> Tensor[]
  5747. variants: function
  5748. dispatch:
  5749. CPU: quantize_per_tensor_list_cpu
  5750. autogen: quantize_per_tensor.tensors_out
  5751. - func: quantize_per_channel(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype) -> Tensor
  5752. variants: function
  5753. dispatch:
  5754. CPU, CUDA: quantize_per_channel
  5755. autogen: quantize_per_channel.out
  5756. - func: dequantize.self(Tensor self) -> Tensor
  5757. variants: function, method
  5758. dispatch:
  5759. CPU, CUDA: dequantize_cpu_or_cuda
  5760. QuantizedCPU, QuantizedCUDA: dequantize_quantized
  5761. autogen: dequantize.self_out
  5762. - func: dequantize.tensors(Tensor[] tensors) -> Tensor[]
  5763. variants: function
  5764. dispatch:
  5765. QuantizedCPU: dequantize_tensors_quantized_cpu
  5766. autogen: dequantize.tensors_out
  5767. - func: q_scale(Tensor self) -> float
  5768. variants: function, method
  5769. dispatch:
  5770. QuantizedCPU, QuantizedCUDA: q_scale_quant
  5771. - func: q_zero_point(Tensor self) -> int
  5772. variants: function, method
  5773. dispatch:
  5774. QuantizedCPU, QuantizedCUDA: q_zero_point_quant
  5775. - func: q_per_channel_scales(Tensor self) -> Tensor
  5776. variants: function, method
  5777. dispatch:
  5778. QuantizedCPU, QuantizedCUDA: q_per_channel_scales
  5779. autogen: q_per_channel_scales.out
  5780. - func: q_per_channel_zero_points(Tensor self) -> Tensor
  5781. variants: function, method
  5782. dispatch:
  5783. QuantizedCPU, QuantizedCUDA: q_per_channel_zero_points
  5784. autogen: q_per_channel_zero_points.out
  5785. - func: q_per_channel_axis(Tensor self) -> int
  5786. variants: function, method
  5787. dispatch:
  5788. QuantizedCPU, QuantizedCUDA: q_per_channel_axis
  5789. - func: int_repr(Tensor self) -> Tensor
  5790. device_check: NoCheck # TensorIterator
  5791. variants: function, method
  5792. dispatch:
  5793. QuantizedCPU: int_repr_quantized_cpu
  5794. QuantizedCUDA: int_repr_quantized_cuda
  5795. autogen: int_repr.out
  5796. - func: _make_per_tensor_quantized_tensor(Tensor self, float scale, int zero_point) -> Tensor
  5797. dispatch:
  5798. CPU: make_per_tensor_quantized_tensor_cpu
  5799. CUDA: make_per_tensor_quantized_tensor_cuda
  5800. autogen: _make_per_tensor_quantized_tensor.out
  5801. - func: _make_per_channel_quantized_tensor(Tensor self, Tensor scale, Tensor zero_point, int axis) -> Tensor
  5802. dispatch:
  5803. CPU: make_per_channel_quantized_tensor_cpu
  5804. CUDA: make_per_channel_quantized_tensor_cuda
  5805. autogen: _make_per_channel_quantized_tensor.out
  5806. - func: qscheme(Tensor self) -> QScheme
  5807. variants: method
  5808. dispatch:
  5809. QuantizedCPU, QuantizedCUDA: qscheme_quant
  5810. - func: fake_quantize_per_tensor_affine(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> Tensor
  5811. device_check: NoCheck # TensorIterator
  5812. variants: function
  5813. - func: fake_quantize_per_tensor_affine.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max) -> Tensor
  5814. device_check: NoCheck # TensorIterator
  5815. variants: function
  5816. - func: fake_quantize_per_tensor_affine_cachemask(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> (Tensor output, Tensor mask)
  5817. variants: function
  5818. dispatch:
  5819. CPU, CUDA: fake_quantize_per_tensor_affine_cachemask
  5820. autogen: fake_quantize_per_tensor_affine_cachemask.out
  5821. - func: _fake_quantize_per_tensor_affine_cachemask_tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max) -> (Tensor output, Tensor mask)
  5822. variants: function
  5823. dispatch:
  5824. CPU, CUDA: _fake_quantize_per_tensor_affine_cachemask_tensor_qparams
  5825. autogen: _fake_quantize_per_tensor_affine_cachemask_tensor_qparams.out
  5826. - func: fake_quantize_per_tensor_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor
  5827. variants: function
  5828. - func: _fake_quantize_learnable_per_tensor_affine(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor
  5829. variants: function
  5830. dispatch:
  5831. CPU, CUDA: _fake_quantize_learnable_per_tensor_affine
  5832. autogen: _fake_quantize_learnable_per_tensor_affine.out
  5833. - func: _fake_quantize_learnable_per_tensor_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor)
  5834. variants: function
  5835. dispatch:
  5836. CPU, CUDA: _fake_quantize_learnable_per_tensor_affine_backward
  5837. - func: fake_quantize_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> Tensor
  5838. device_check: NoCheck # TensorIterator
  5839. variants: function
  5840. - func: fake_quantize_per_channel_affine_cachemask(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> (Tensor output, Tensor mask)
  5841. variants: function
  5842. dispatch:
  5843. CPU, CUDA: fake_quantize_per_channel_affine_cachemask
  5844. autogen: fake_quantize_per_channel_affine_cachemask.out
  5845. - func: fake_quantize_per_channel_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor
  5846. variants: function
  5847. - func: _fake_quantize_learnable_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor
  5848. variants: function
  5849. dispatch:
  5850. CPU, CUDA: _fake_quantize_learnable_per_channel_affine
  5851. autogen: _fake_quantize_learnable_per_channel_affine.out
  5852. - func: _fake_quantize_learnable_per_channel_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor)
  5853. variants: function
  5854. dispatch:
  5855. CPU, CUDA: _fake_quantize_learnable_per_channel_affine_backward
  5856. - func: fused_moving_avg_obs_fake_quant(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> Tensor
  5857. variants: function
  5858. - func: _fused_moving_avg_obs_fq_helper(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask)
  5859. dispatch:
  5860. CPU: fused_moving_avg_obs_fake_quant_cpu
  5861. CUDA: fused_moving_avg_obs_fake_quant_cuda
  5862. autogen: _fused_moving_avg_obs_fq_helper_functional, _fused_moving_avg_obs_fq_helper.out
  5863. - func: _choose_qparams_per_tensor(Tensor self, bool reduce_range=False) -> (float, int)
  5864. variants: function
  5865. - func: _saturate_weight_to_fp16(Tensor weight) -> Tensor
  5866. variants: function
  5867. - func: choose_qparams_optimized(Tensor input, int numel, int n_bins, float ratio, int bit_width) -> (Tensor, Tensor)
  5868. variants: function
  5869. - func: _autocast_to_reduced_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled, ScalarType cuda_dtype, ScalarType cpu_dtype) -> Tensor(a)
  5870. variants: method
  5871. device_guard: False
  5872. - func: _autocast_to_full_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled) -> Tensor(a)
  5873. variants: method
  5874. device_guard: False
  5875. - func: _to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor
  5876. device_check: NoCheck
  5877. device_guard: False
  5878. dispatch:
  5879. CompositeExplicitAutograd: _to_copy
  5880. NestedTensorCPU, NestedTensorCUDA: _to_copy_nested
  5881. autogen: _to_copy.out
  5882. tags: core
  5883. # to(Device) must not exist because all constructors of Device also works for
  5884. # TensorOptions. Otherwise, an ambiguity error is thrown.
  5885. # See NOTE [ TensorOptions Constructors ].
  5886. - func: to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
  5887. variants: method
  5888. device_check: NoCheck
  5889. device_guard: False
  5890. - func: to.device(Tensor(a) self, Device device, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
  5891. variants: method
  5892. device_check: NoCheck
  5893. device_guard: False
  5894. - func: to.dtype(Tensor(a) self, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
  5895. variants: method
  5896. device_check: NoCheck
  5897. device_guard: False
  5898. - func: to.other(Tensor(a) self, Tensor other, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
  5899. variants: method
  5900. device_check: NoCheck
  5901. device_guard: False
  5902. - func: meshgrid(Tensor[] tensors) -> Tensor[]
  5903. # TODO: Two weeks after this lands, combine these two overloads,
  5904. # making "indexing" optional. These are temporarily distinct for
  5905. # forward-compatibility reasons.
  5906. - func: meshgrid.indexing(Tensor[] tensors, *, str indexing) -> Tensor[]
  5907. - func: cartesian_prod(Tensor[] tensors) -> Tensor
  5908. variants: function
  5909. - func: combinations(Tensor self, int r=2, bool with_replacement=False) -> Tensor
  5910. variants: function
  5911. - func: item(Tensor self) -> Scalar
  5912. tags: data_dependent_output
  5913. variants: method
  5914. - func: result_type.Tensor(Tensor tensor, Tensor other) -> ScalarType
  5915. variants: function
  5916. - func: result_type.Scalar(Tensor tensor, Scalar other) -> ScalarType
  5917. variants: function
  5918. - func: result_type.Scalar_Tensor(Scalar scalar, Tensor tensor) -> ScalarType
  5919. variants: function
  5920. - func: result_type.Scalar_Scalar(Scalar scalar1, Scalar scalar2) -> ScalarType
  5921. - func: can_cast(ScalarType from, ScalarType to) -> bool
  5922. variants: function
  5923. - func: promote_types(ScalarType type1, ScalarType type2) -> ScalarType
  5924. variants: function
  5925. # NB: Does NOT check precondition that numel == 1
  5926. - func: _local_scalar_dense(Tensor self) -> Scalar
  5927. tags: data_dependent_output
  5928. dispatch:
  5929. CPU: _local_scalar_dense_cpu
  5930. CUDA: _local_scalar_dense_cuda
  5931. MPS: _local_scalar_dense_mps
  5932. variants: function
  5933. # MPS LSTM implementation
  5934. - func: _lstm_mps(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor)
  5935. dispatch:
  5936. MPS: _lstm_mps
  5937. autogen: _lstm_mps.out
  5938. - func: lstm_mps_backward(Tensor grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor[], Tensor[])
  5939. dispatch:
  5940. MPS: lstm_mps_backward
  5941. autogen: lstm_mps_backward.out
  5942. # Fused RNN kernels
  5943. - func: _thnn_fused_lstm_cell(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor, Tensor)
  5944. dispatch:
  5945. CUDA: _thnn_fused_lstm_cell_cuda
  5946. autogen: _thnn_fused_lstm_cell.out
  5947. # NB: The composite version of this function below is a simple wrapper that duplicates some of the outputs
  5948. # It is necessary to avoid triggering TensorImpl use count checks in debug mode
  5949. # NB: this is function is NOT differentiable
  5950. - func: _thnn_fused_lstm_cell_backward_impl(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor)
  5951. dispatch:
  5952. CUDA: _thnn_fused_lstm_cell_backward_impl_cuda
  5953. autogen: _thnn_fused_lstm_cell_backward_impl.out
  5954. - func: _thnn_fused_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
  5955. - func: _thnn_differentiable_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor input_gates, Tensor hidden_gates, Tensor? input_bias, Tensor? hidden_bias, Tensor cx, Tensor cy) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
  5956. - func: _thnn_fused_gru_cell(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor)
  5957. dispatch:
  5958. CUDA: _thnn_fused_gru_cell_cuda
  5959. autogen: _thnn_fused_gru_cell.out
  5960. - func: _thnn_fused_gru_cell_backward(Tensor grad_hy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
  5961. dispatch:
  5962. CUDA: _thnn_fused_gru_cell_backward_cuda
  5963. autogen: _thnn_fused_gru_cell_backward.out
  5964. - func: _thnn_differentiable_gru_cell_backward(Tensor grad_hy, Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias, Tensor? hidden_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
  5965. # RNN cells and layers
  5966. - func: lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor)
  5967. - func: lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor)
  5968. - func: gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
  5969. - func: gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)
  5970. - func: rnn_tanh.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
  5971. - func: rnn_tanh.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)
  5972. - func: rnn_relu.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
  5973. - func: rnn_relu.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)
  5974. - func: lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor)
  5975. - func: gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor
  5976. - func: rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor
  5977. - func: rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor
  5978. # Quantized RNN layer registration has been moved to C10 dispatch in `RNN.cpp`
  5979. # Quantized RNN layers
  5980. # - func: quantized_lstm(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, ScalarType? dtype=None, bool use_dynamic=False) -> (Tensor, Tensor, Tensor)
  5981. # - func: quantized_lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, *, ScalarType? dtype=None, bool use_dynamic=False) -> (Tensor, Tensor, Tensor)
  5982. # Quantized GRU layers
  5983. # - func: quantized_gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
  5984. #
  5985. # - func: quantized_gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)
  5986. #
  5987. # Quantized RNN cells
  5988. - func: quantized_lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> (Tensor, Tensor)
  5989. - func: quantized_gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor
  5990. - func: quantized_rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor
  5991. - func: quantized_rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor
  5992. # PackedSequence utilities
  5993. - func: _pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first) -> (Tensor, Tensor)
  5994. dispatch:
  5995. CompositeExplicitAutograd: _pack_padded_sequence
  5996. autogen: _pack_padded_sequence.out
  5997. - func: _pack_padded_sequence_backward(Tensor grad, SymInt[] input_size, Tensor batch_sizes, bool batch_first) -> Tensor
  5998. dispatch:
  5999. CompositeImplicitAutograd: _pack_padded_sequence_backward_symint
  6000. - func: _pad_packed_sequence(Tensor data, Tensor batch_sizes, bool batch_first, Scalar padding_value, int total_length) -> (Tensor, Tensor)
  6001. # wrappers for legacy TH methods
  6002. - func: set_.source_Storage(Tensor(a!) self, Storage source) -> Tensor(a!)
  6003. variants: method
  6004. device_check: NoCheck
  6005. device_guard: False
  6006. dispatch:
  6007. CPU, CUDA, Meta, MPS: set_
  6008. autogen: set.source_Storage, set.source_Storage_out
  6009. - func: set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!)
  6010. variants: method
  6011. device_check: NoCheck
  6012. device_guard: False
  6013. dispatch:
  6014. CPU: set_storage_cpu_
  6015. Meta: set_storage_meta__symint
  6016. CUDA: set_storage_cuda_
  6017. MPS: set_storage_mps_
  6018. QuantizedCPU, QuantizedCUDA: set_storage_quantized_
  6019. autogen: set.source_Storage_storage_offset, set.source_Storage_storage_offset_out
  6020. - func: set_.source_Tensor_storage_offset(Tensor(a!) self, Tensor source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!)
  6021. variants: method
  6022. device_check: NoCheck
  6023. device_guard: False
  6024. dispatch:
  6025. CompositeImplicitAutograd: set__symint
  6026. - func: set_.source_Tensor(Tensor(a!) self, Tensor source) -> Tensor(a!)
  6027. variants: method
  6028. device_check: NoCheck
  6029. device_guard: False
  6030. dispatch:
  6031. CPU, CUDA, Meta, MPS: set_tensor_
  6032. autogen: set.source_Tensor, set.source_Tensor_out
  6033. - func: set_(Tensor(a!) self) -> Tensor(a!)
  6034. variants: method
  6035. dispatch:
  6036. CPU: set_cpu_
  6037. CUDA: set_cuda_
  6038. Meta: set_meta_
  6039. MPS: set_mps_
  6040. autogen: set, set.out
  6041. # Not making it CompositeImplicitAutograd because lift
  6042. # should be a primitive w.r.t. functorch
  6043. # TODO: this should have a view annotation
  6044. # TODO: shouldn't be a method
  6045. - func: lift(Tensor self) -> Tensor
  6046. dispatch:
  6047. CompositeExplicitAutograd: lift
  6048. autogen: lift.out
  6049. # lift_fresh is called with an argument that is guaranteed to be
  6050. # fresh (i.e., newly allocated). This is ONLY called from a
  6051. # torch.tensor call; if you FX trace a lift_fresh, you are obligated
  6052. # to convert this into a lift_fresh_copy (because FX will violate the
  6053. # freshness invariant when tracing).
  6054. - func: lift_fresh(Tensor(a) self) -> Tensor(a)
  6055. dispatch:
  6056. CompositeExplicitAutograd: lift_fresh
  6057. # Like lift, but it clones the input.
  6058. - func: lift_fresh_copy(Tensor self) -> Tensor
  6059. tags: view_copy
  6060. dispatch:
  6061. CompositeExplicitAutogradNonFunctional: lift_fresh_copy
  6062. autogen: lift_fresh_copy.out
  6063. - func: is_set_to(Tensor self, Tensor tensor) -> bool
  6064. variants: method
  6065. device_check: NoCheck
  6066. device_guard: False
  6067. dispatch:
  6068. CPU, CUDA, MPS: is_set_to
  6069. - func: masked_fill_.Scalar(Tensor(a!) self, Tensor mask, Scalar value) -> Tensor(a!)
  6070. device_check: NoCheck # TensorIterator
  6071. variants: method
  6072. dispatch:
  6073. CPU: masked_fill__cpu
  6074. CUDA: masked_fill__cuda
  6075. QuantizedCPU: masked_fill__quantized_cpu
  6076. QuantizedCUDA: masked_fill__quantized_cuda
  6077. MPS: masked_fill__mps
  6078. autogen: masked_fill.Scalar_out
  6079. - func: masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor
  6080. device_check: NoCheck # TensorIterator
  6081. variants: function, method
  6082. dispatch:
  6083. CompositeExplicitAutograd: masked_fill
  6084. tags: pointwise
  6085. - func: masked_fill_.Tensor(Tensor(a!) self, Tensor mask, Tensor value) -> Tensor(a!)
  6086. device_check: NoCheck # TensorIterator
  6087. variants: method
  6088. dispatch:
  6089. CPU: masked_fill__cpu
  6090. CUDA: masked_fill__cuda
  6091. QuantizedCPU: masked_fill__quantized_cpu
  6092. QuantizedCUDA: masked_fill__quantized_cuda
  6093. MPS: masked_fill__mps
  6094. autogen: masked_fill.Tensor_out
  6095. - func: masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor
  6096. device_check: NoCheck # TensorIterator
  6097. variants: function, method
  6098. dispatch:
  6099. CompositeExplicitAutograd: masked_fill
  6100. - func: masked_scatter_(Tensor(a!) self, Tensor mask, Tensor source) -> Tensor(a!)
  6101. variants: method
  6102. dispatch:
  6103. CPU: masked_scatter__cpu
  6104. CUDA: masked_scatter__cuda
  6105. autogen: masked_scatter.out
  6106. - func: masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor
  6107. variants: function, method
  6108. dispatch:
  6109. CompositeExplicitAutograd: masked_scatter
  6110. - func: _masked_softmax(Tensor self, Tensor mask, int? dim=None, int? mask_type=None) -> Tensor
  6111. dispatch:
  6112. CUDA: masked_softmax_cuda
  6113. CPU: masked_softmax_cpu
  6114. autogen: _masked_softmax.out
  6115. - func: _masked_softmax_backward(Tensor grad_output, Tensor output, Tensor mask, int? dim=None) -> Tensor
  6116. dispatch:
  6117. CUDA: masked_softmax_backward_cuda
  6118. CPU: masked_softmax_backward_cpu
  6119. autogen: _masked_softmax_backward.out
  6120. - func: view(Tensor(a) self, SymInt[] size) -> Tensor(a)
  6121. variants: method
  6122. device_check: NoCheck
  6123. device_guard: False
  6124. dispatch:
  6125. ZeroTensor, Meta, CPU, CUDA, QuantizedCPU, QuantizedCUDA, MPS: view
  6126. MkldnnCPU: mkldnn_view
  6127. NestedTensorCPU, NestedTensorCUDA: view_nested
  6128. tags: core
  6129. # Warning: If you want to change the name or overload name of this
  6130. # operator, you might also want to change the `isBlockListedSchema`
  6131. # function in `torch/csrc/jit/frontend/schema_catching.cpp`.
  6132. # The name and overload name of this operator is hardcoded in that
  6133. # function in order to workaround a bug:
  6134. # https://github.com/pytorch/pytorch/issues/47964
  6135. - func: view.dtype(Tensor(a) self, ScalarType dtype) -> Tensor(a)
  6136. variants: method
  6137. device_check: NoCheck
  6138. device_guard: False
  6139. dispatch:
  6140. CompositeExplicitAutograd: view_dtype
  6141. - func: put_(Tensor(a!) self, Tensor index, Tensor source, bool accumulate=False) -> Tensor(a!)
  6142. variants: method
  6143. dispatch:
  6144. CPU, CUDA: put_
  6145. autogen: put.out
  6146. - func: put(Tensor self, Tensor index, Tensor source, bool accumulate=False) -> Tensor
  6147. variants: function, method
  6148. dispatch:
  6149. CompositeExplicitAutograd: put
  6150. - func: index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
  6151. structured: True
  6152. variants: function
  6153. precomputed:
  6154. - dim -> int dim
  6155. dispatch:
  6156. CPU: index_add_cpu_out
  6157. CUDA: index_add_cuda_out
  6158. MPS: index_add_mps_out
  6159. - func: index_add_(Tensor(a!) self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor(a!)
  6160. structured_delegate: index_add.out
  6161. variants: method
  6162. - func: index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor
  6163. structured_delegate: index_add.out
  6164. variants: function, method
  6165. - func: index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor
  6166. variants: function, method
  6167. - func: index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)
  6168. structured: True
  6169. variants: function
  6170. precomputed:
  6171. - dim -> int dim
  6172. dispatch:
  6173. CPU: index_reduce_cpu_out
  6174. CUDA: index_reduce_cuda_out
  6175. - func: index_reduce_(Tensor(a!) self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor(a!)
  6176. structured_delegate: index_reduce.out
  6177. variants: method
  6178. - func: index_reduce(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor
  6179. structured_delegate: index_reduce.out
  6180. variants: function, method
  6181. - func: index_fill_.int_Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)
  6182. device_check: NoCheck # TensorIterator
  6183. variants: method
  6184. dispatch:
  6185. CPU: index_fill_
  6186. CUDA: index_fill_
  6187. autogen: index_fill.int_Scalar_out
  6188. - func: index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor
  6189. device_check: NoCheck # TensorIterator
  6190. variants: function, method
  6191. dispatch:
  6192. CompositeExplicitAutograd: index_fill
  6193. - func: index_fill_.int_Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!)
  6194. device_check: NoCheck # TensorIterator
  6195. variants: method
  6196. dispatch:
  6197. CPU, CUDA: index_fill_
  6198. autogen: index_fill.int_Tensor_out
  6199. - func: index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor
  6200. device_check: NoCheck # TensorIterator
  6201. variants: function, method
  6202. dispatch:
  6203. CompositeExplicitAutograd: index_fill
  6204. - func: index_fill_.Dimname_Scalar(Tensor(a!) self, Dimname dim, Tensor index, Scalar value) -> Tensor(a!)
  6205. device_check: NoCheck # TensorIterator
  6206. variants: method
  6207. - func: index_fill_.Dimname_Tensor(Tensor(a!) self, Dimname dim, Tensor index, Tensor value) -> Tensor(a!)
  6208. device_check: NoCheck # TensorIterator
  6209. variants: method
  6210. - func: index_fill.Dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor
  6211. device_check: NoCheck # TensorIterator
  6212. variants: function, method
  6213. - func: index_fill.Dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor
  6214. device_check: NoCheck # TensorIterator
  6215. variants: function, method
  6216. - func: scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor
  6217. structured_delegate: scatter.src_out
  6218. variants: function, method
  6219. - func: scatter_.src(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)
  6220. structured_delegate: scatter.src_out
  6221. variants: method
  6222. - func: scatter.src_out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)
  6223. structured: True
  6224. variants: function
  6225. dispatch:
  6226. CPU, CUDA: scatter_src_out
  6227. MPS: scatter_src_out_mps
  6228. - func: scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor
  6229. structured_delegate: scatter.value_out
  6230. variants: function, method
  6231. - func: scatter_.value(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)
  6232. structured_delegate: scatter.value_out
  6233. variants: method
  6234. - func: scatter.value_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
  6235. structured: True
  6236. variants: function
  6237. dispatch:
  6238. CPU, CUDA: scatter_value_out
  6239. MPS: scatter_value_out_mps
  6240. - func: scatter.reduce(Tensor self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor
  6241. structured_delegate: scatter.reduce_out
  6242. variants: function, method
  6243. - func: scatter_.reduce(Tensor(a!) self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor(a!)
  6244. structured_delegate: scatter.reduce_out
  6245. variants: method
  6246. - func: scatter.reduce_out(Tensor self, int dim, Tensor index, Tensor src, *, str reduce, Tensor(a!) out) -> Tensor(a!)
  6247. structured: True
  6248. variants: function
  6249. dispatch:
  6250. CPU, CUDA: scatter_reduce_out
  6251. MPS: scatter_reduce_out_mps
  6252. - func: scatter.value_reduce(Tensor self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor
  6253. structured_delegate: scatter.value_reduce_out
  6254. variants: function, method
  6255. - func: scatter_.value_reduce(Tensor(a!) self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor(a!)
  6256. structured_delegate: scatter.value_reduce_out
  6257. variants: method
  6258. - func: scatter.value_reduce_out(Tensor self, int dim, Tensor index, Scalar value, *, str reduce, Tensor(a!) out) -> Tensor(a!)
  6259. structured: True
  6260. variants: function
  6261. dispatch:
  6262. CPU, CUDA: scatter_value_reduce_out
  6263. MPS: scatter_value_reduce_out_mps
  6264. - func: scatter.dimname_src(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor
  6265. variants: function, method
  6266. - func: scatter.dimname_value(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor
  6267. variants: function, method
  6268. - func: scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor
  6269. structured_delegate: scatter_add.out
  6270. variants: function, method
  6271. tags: core
  6272. - func: scatter_add_(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)
  6273. structured_delegate: scatter_add.out
  6274. variants: method
  6275. - func: scatter_add.out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)
  6276. structured: True
  6277. variants: function
  6278. dispatch:
  6279. CPU, CUDA: scatter_add
  6280. MPS: scatter_add_mps_out
  6281. - func: scatter_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor
  6282. variants: function, method
  6283. - func: scatter_reduce.two(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor
  6284. structured_delegate: scatter_reduce.two_out
  6285. variants: function, method
  6286. tags: core
  6287. - func: scatter_reduce_.two(Tensor(a!) self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor(a!)
  6288. structured_delegate: scatter_reduce.two_out
  6289. variants: method
  6290. - func: scatter_reduce.two_out(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)
  6291. structured: True
  6292. variants: function
  6293. dispatch:
  6294. CPU, CUDA: scatter_reduce_two
  6295. - func: eq_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
  6296. structured_delegate: eq.Scalar_out
  6297. device_check: NoCheck # TensorIterator
  6298. variants: method
  6299. - func: eq_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
  6300. structured_delegate: eq.Tensor_out
  6301. device_check: NoCheck # TensorIterator
  6302. variants: method
  6303. - func: bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  6304. device_check: NoCheck # TensorIterator
  6305. structured: True
  6306. structured_inherits: TensorIteratorBase
  6307. variants: function
  6308. dispatch:
  6309. CPU, CUDA: bitwise_and_out
  6310. tags: pointwise
  6311. - func: bitwise_and.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
  6312. device_check: NoCheck # TensorIterator
  6313. variants: function
  6314. dispatch:
  6315. CompositeExplicitAutograd: bitwise_and_out
  6316. tags: pointwise
  6317. - func: bitwise_and.Scalar(Tensor self, Scalar other) -> Tensor
  6318. device_check: NoCheck # TensorIterator
  6319. variants: method, function
  6320. dispatch:
  6321. CompositeExplicitAutograd: bitwise_and
  6322. tags: pointwise
  6323. - func: bitwise_and.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
  6324. device_check: NoCheck # TensorIterator
  6325. variants: function
  6326. dispatch:
  6327. CompositeExplicitAutograd: bitwise_and
  6328. autogen: bitwise_and.Scalar_Tensor_out
  6329. tags: pointwise
  6330. - func: bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor
  6331. device_check: NoCheck # TensorIterator
  6332. variants: method, function
  6333. structured_delegate: bitwise_and.Tensor_out
  6334. tags: [core, pointwise]
  6335. - func: bitwise_and_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
  6336. device_check: NoCheck # TensorIterator
  6337. variants: method
  6338. tags: pointwise
  6339. - func: bitwise_and_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
  6340. device_check: NoCheck # TensorIterator
  6341. variants: method
  6342. structured_delegate: bitwise_and.Tensor_out
  6343. tags: pointwise
  6344. - func: __and__.Scalar(Tensor self, Scalar other) -> Tensor
  6345. device_check: NoCheck # TensorIterator
  6346. variants: method, function
  6347. - func: __and__.Tensor(Tensor self, Tensor other) -> Tensor
  6348. device_check: NoCheck # TensorIterator
  6349. variants: method, function
  6350. - func: __iand__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
  6351. device_check: NoCheck # TensorIterator
  6352. variants: method
  6353. - func: __iand__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
  6354. device_check: NoCheck # TensorIterator
  6355. variants: method
  6356. - func: bitwise_or.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  6357. device_check: NoCheck # TensorIterator
  6358. structured: True
  6359. structured_inherits: TensorIteratorBase
  6360. variants: function
  6361. dispatch:
  6362. CPU, CUDA: bitwise_or_out
  6363. tags: pointwise
  6364. - func: bitwise_or.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
  6365. device_check: NoCheck # TensorIterator
  6366. variants: function
  6367. dispatch:
  6368. CompositeExplicitAutograd: bitwise_or_out
  6369. tags: pointwise
  6370. - func: bitwise_or.Scalar(Tensor self, Scalar other) -> Tensor
  6371. device_check: NoCheck # TensorIterator
  6372. variants: method, function
  6373. tags: pointwise
  6374. - func: bitwise_or.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
  6375. device_check: NoCheck # TensorIterator
  6376. variants: function
  6377. dispatch:
  6378. CompositeExplicitAutograd: bitwise_or
  6379. autogen: bitwise_or.Scalar_Tensor_out
  6380. tags: pointwise
  6381. - func: bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor
  6382. device_check: NoCheck # TensorIterator
  6383. variants: method, function
  6384. structured_delegate: bitwise_or.Tensor_out
  6385. tags: [core, pointwise]
  6386. - func: bitwise_or_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
  6387. device_check: NoCheck # TensorIterator
  6388. variants: method
  6389. tags: pointwise
  6390. - func: bitwise_or_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
  6391. device_check: NoCheck # TensorIterator
  6392. variants: method
  6393. structured_delegate: bitwise_or.Tensor_out
  6394. tags: pointwise
  6395. - func: __or__.Scalar(Tensor self, Scalar other) -> Tensor
  6396. device_check: NoCheck # TensorIterator
  6397. variants: method, function
  6398. - func: __or__.Tensor(Tensor self, Tensor other) -> Tensor
  6399. device_check: NoCheck # TensorIterator
  6400. variants: method, function
  6401. - func: __ior__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
  6402. device_check: NoCheck # TensorIterator
  6403. variants: method
  6404. - func: __ior__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
  6405. device_check: NoCheck # TensorIterator
  6406. variants: method
  6407. - func: bitwise_xor.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  6408. device_check: NoCheck # TensorIterator
  6409. structured: True
  6410. structured_inherits: TensorIteratorBase
  6411. variants: function
  6412. dispatch:
  6413. CPU, CUDA: bitwise_xor_out
  6414. tags: pointwise
  6415. - func: bitwise_xor.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
  6416. device_check: NoCheck # TensorIterator
  6417. variants: function
  6418. dispatch:
  6419. CompositeExplicitAutograd: bitwise_xor_out
  6420. tags: pointwise
  6421. - func: bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor
  6422. device_check: NoCheck # TensorIterator
  6423. variants: method, function
  6424. tags: pointwise
  6425. - func: bitwise_xor.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
  6426. device_check: NoCheck # TensorIterator
  6427. variants: function
  6428. dispatch:
  6429. CompositeExplicitAutograd: bitwise_xor
  6430. autogen: bitwise_xor.Scalar_Tensor_out
  6431. tags: pointwise
  6432. - func: bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor
  6433. device_check: NoCheck # TensorIterator
  6434. variants: method, function
  6435. structured_delegate: bitwise_xor.Tensor_out
  6436. tags: [core, pointwise]
  6437. - func: bitwise_xor_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
  6438. device_check: NoCheck # TensorIterator
  6439. variants: method
  6440. tags: pointwise
  6441. - func: bitwise_xor_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
  6442. device_check: NoCheck # TensorIterator
  6443. variants: method
  6444. structured_delegate: bitwise_xor.Tensor_out
  6445. tags: pointwise
  6446. - func: __xor__.Scalar(Tensor self, Scalar other) -> Tensor
  6447. device_check: NoCheck # TensorIterator
  6448. variants: method, function
  6449. tags: pointwise
  6450. - func: __xor__.Tensor(Tensor self, Tensor other) -> Tensor
  6451. device_check: NoCheck # TensorIterator
  6452. variants: method, function
  6453. tags: pointwise
  6454. - func: __ixor__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
  6455. device_check: NoCheck # TensorIterator
  6456. variants: method
  6457. tags: pointwise
  6458. - func: __ixor__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
  6459. device_check: NoCheck # TensorIterator
  6460. variants: method
  6461. tags: pointwise
  6462. - func: __lshift__.Scalar(Tensor self, Scalar other) -> Tensor
  6463. device_check: NoCheck # TensorIterator
  6464. variants: method, function
  6465. dispatch:
  6466. CPU, CUDA: __lshift__
  6467. tags: pointwise
  6468. - func: __lshift__.Tensor(Tensor self, Tensor other) -> Tensor
  6469. device_check: NoCheck # TensorIterator
  6470. variants: method, function
  6471. dispatch:
  6472. CPU, CUDA: __lshift__
  6473. tags: pointwise
  6474. - func: __ilshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
  6475. device_check: NoCheck # TensorIterator
  6476. variants: method
  6477. dispatch:
  6478. CPU, CUDA: __ilshift__
  6479. autogen: __lshift__.Scalar_out
  6480. tags: pointwise
  6481. - func: __ilshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
  6482. device_check: NoCheck # TensorIterator
  6483. variants: method
  6484. dispatch:
  6485. CPU, CUDA: __ilshift__
  6486. autogen: __lshift__.Tensor_out
  6487. tags: pointwise
  6488. - func: bitwise_left_shift.Tensor(Tensor self, Tensor other) -> Tensor
  6489. device_check: NoCheck # TensorIterator
  6490. variants: function, method
  6491. structured_delegate: bitwise_left_shift.Tensor_out
  6492. tags: pointwise
  6493. - func: bitwise_left_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
  6494. device_check: NoCheck # TensorIterator
  6495. variants: method
  6496. structured_delegate: bitwise_left_shift.Tensor_out
  6497. tags: pointwise
  6498. - func: bitwise_left_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  6499. device_check: NoCheck # TensorIterator
  6500. structured: True
  6501. structured_inherits: TensorIteratorBase
  6502. dispatch:
  6503. CPU, CUDA: bitwise_left_shift_out
  6504. tags: pointwise
  6505. - func: bitwise_left_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor
  6506. device_check: NoCheck # TensorIterator
  6507. variants: method, function
  6508. dispatch:
  6509. CompositeExplicitAutograd: bitwise_left_shift
  6510. tags: pointwise
  6511. - func: bitwise_left_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
  6512. device_check: NoCheck # TensorIterator
  6513. variants: method
  6514. dispatch:
  6515. CompositeExplicitAutograd: bitwise_left_shift_
  6516. tags: pointwise
  6517. - func: bitwise_left_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
  6518. device_check: NoCheck # TensorIterator
  6519. variants: function
  6520. dispatch:
  6521. CompositeExplicitAutograd: bitwise_left_shift_out
  6522. tags: pointwise
  6523. - func: bitwise_left_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
  6524. device_check: NoCheck # TensorIterator
  6525. variants: function
  6526. dispatch:
  6527. CompositeExplicitAutograd: bitwise_left_shift
  6528. autogen: bitwise_left_shift.Scalar_Tensor_out
  6529. tags: pointwise
  6530. - func: __rshift__.Scalar(Tensor self, Scalar other) -> Tensor
  6531. device_check: NoCheck # TensorIterator
  6532. variants: method, function
  6533. dispatch:
  6534. CPU, CUDA: __rshift__
  6535. tags: pointwise
  6536. - func: __rshift__.Tensor(Tensor self, Tensor other) -> Tensor
  6537. device_check: NoCheck # TensorIterator
  6538. variants: method, function
  6539. dispatch:
  6540. CPU, CUDA: __rshift__
  6541. tags: pointwise
  6542. - func: __irshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
  6543. device_check: NoCheck # TensorIterator
  6544. variants: method
  6545. dispatch:
  6546. CPU, CUDA: __irshift__
  6547. autogen: __rshift__.Scalar_out
  6548. - func: __irshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
  6549. device_check: NoCheck # TensorIterator
  6550. variants: method
  6551. dispatch:
  6552. CPU, CUDA: __irshift__
  6553. autogen: __rshift__.Tensor_out
  6554. - func: bitwise_right_shift.Tensor(Tensor self, Tensor other) -> Tensor
  6555. device_check: NoCheck # TensorIterator
  6556. variants: function, method
  6557. structured_delegate: bitwise_right_shift.Tensor_out
  6558. tags: pointwise
  6559. - func: bitwise_right_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
  6560. device_check: NoCheck # TensorIterator
  6561. variants: method
  6562. structured_delegate: bitwise_right_shift.Tensor_out
  6563. tags: pointwise
  6564. - func: bitwise_right_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  6565. device_check: NoCheck # TensorIterator
  6566. structured: True
  6567. structured_inherits: TensorIteratorBase
  6568. dispatch:
  6569. CPU, CUDA: bitwise_right_shift_out
  6570. tags: pointwise
  6571. - func: bitwise_right_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor
  6572. device_check: NoCheck # TensorIterator
  6573. variants: method, function
  6574. dispatch:
  6575. CompositeExplicitAutograd: bitwise_right_shift
  6576. tags: pointwise
  6577. - func: bitwise_right_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
  6578. device_check: NoCheck # TensorIterator
  6579. variants: method
  6580. dispatch:
  6581. CompositeExplicitAutograd: bitwise_right_shift_
  6582. tags: pointwise
  6583. - func: bitwise_right_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
  6584. device_check: NoCheck # TensorIterator
  6585. variants: function
  6586. dispatch:
  6587. CompositeExplicitAutograd: bitwise_right_shift_out
  6588. tags: pointwise
  6589. - func: bitwise_right_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
  6590. device_check: NoCheck # TensorIterator
  6591. variants: function
  6592. dispatch:
  6593. CompositeExplicitAutograd: bitwise_right_shift
  6594. autogen: bitwise_right_shift.Scalar_Tensor_out
  6595. tags: pointwise
  6596. - func: tril_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)
  6597. structured_delegate: tril.out
  6598. variants: method
  6599. - func: triu_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)
  6600. structured_delegate: triu.out
  6601. variants: method
  6602. - func: digamma_(Tensor(a!) self) -> Tensor(a!)
  6603. device_check: NoCheck # TensorIterator
  6604. structured_delegate: digamma.out
  6605. variants: method
  6606. tags: pointwise
  6607. - func: lerp_.Scalar(Tensor(a!) self, Tensor end, Scalar weight) -> Tensor(a!)
  6608. device_check: NoCheck # TensorIterator
  6609. variants: method
  6610. structured_delegate: lerp.Scalar_out
  6611. tags: pointwise
  6612. - func: lerp_.Tensor(Tensor(a!) self, Tensor end, Tensor weight) -> Tensor(a!)
  6613. device_check: NoCheck # TensorIterator
  6614. variants: method
  6615. structured_delegate: lerp.Tensor_out
  6616. tags: pointwise
  6617. - func: addbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
  6618. variants: method
  6619. dispatch:
  6620. CPU, CUDA: addbmm_
  6621. MPS: addbmm_mps_
  6622. - func: addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
  6623. dispatch:
  6624. CPU, CUDA: addbmm_out
  6625. MPS: addbmm_out_mps
  6626. - func: addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
  6627. variants: method, function
  6628. dispatch:
  6629. CPU, CUDA: addbmm
  6630. MPS: addbmm_mps
  6631. - func: random_.from(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!)
  6632. device_check: NoCheck # TensorIterator
  6633. variants: method
  6634. tags: nondeterministic_seeded
  6635. dispatch:
  6636. CPU, CUDA: random_
  6637. Meta: random_meta_
  6638. MPS: random_mps_
  6639. autogen: random.from, random.from_out
  6640. - func: random_.to(Tensor(a!) self, int to, *, Generator? generator=None) -> Tensor(a!)
  6641. device_check: NoCheck # TensorIterator
  6642. tags: nondeterministic_seeded
  6643. variants: method
  6644. dispatch:
  6645. CPU, CUDA: random_
  6646. Meta: random_meta_
  6647. MPS: random_mps_
  6648. autogen: random.to, random.to_out
  6649. - func: random_(Tensor(a!) self, *, Generator? generator=None) -> Tensor(a!)
  6650. device_check: NoCheck # TensorIterator
  6651. tags: nondeterministic_seeded
  6652. variants: method
  6653. dispatch:
  6654. CPU, CUDA: random_
  6655. Meta: random_meta_
  6656. autogen: random, random.out
  6657. - func: uniform_(Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None) -> Tensor(a!)
  6658. device_check: NoCheck # TensorIterator
  6659. tags: nondeterministic_seeded
  6660. variants: method
  6661. dispatch:
  6662. CPU, CUDA: uniform_
  6663. MPS: uniform_mps_
  6664. Meta: uniform_meta_
  6665. autogen: uniform, uniform.out
  6666. - func: cauchy_(Tensor(a!) self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor(a!)
  6667. device_check: NoCheck # TensorIterator
  6668. variants: method
  6669. tags: nondeterministic_seeded
  6670. dispatch:
  6671. CPU, CUDA: cauchy_
  6672. autogen: cauchy, cauchy.out
  6673. - func: log_normal_(Tensor(a!) self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor(a!)
  6674. device_check: NoCheck # TensorIterator
  6675. tags: nondeterministic_seeded
  6676. variants: method
  6677. dispatch:
  6678. CPU, CUDA: log_normal_
  6679. autogen: log_normal, log_normal.out
  6680. - func: exponential_(Tensor(a!) self, float lambd=1, *, Generator? generator=None) -> Tensor(a!)
  6681. device_check: NoCheck # TensorIterator
  6682. tags: nondeterministic_seeded
  6683. variants: method
  6684. dispatch:
  6685. CPU, CUDA: exponential_
  6686. MPS: exponential_mps_
  6687. autogen: exponential, exponential.out
  6688. - func: geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!)
  6689. device_check: NoCheck # TensorIterator
  6690. tags: nondeterministic_seeded
  6691. variants: method
  6692. dispatch:
  6693. CPU, CUDA: geometric_
  6694. # wrappers for TH functions
  6695. autogen: geometric, geometric.out
  6696. - func: diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)
  6697. - func: diag(Tensor self, int diagonal=0) -> Tensor
  6698. variants: method, function
  6699. - func: cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
  6700. - func: cross(Tensor self, Tensor other, int? dim=None) -> Tensor
  6701. variants: method, function
  6702. - func: triu.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)
  6703. structured: True
  6704. dispatch:
  6705. CPU: triu_cpu
  6706. CUDA: triu_cuda
  6707. MPS: triu_mps_out
  6708. - func: triu(Tensor self, int diagonal=0) -> Tensor
  6709. structured_delegate: triu.out
  6710. variants: method, function
  6711. - func: tril.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)
  6712. structured: True
  6713. dispatch:
  6714. CPU: tril_cpu
  6715. CUDA: tril_cuda
  6716. MPS: tril_mps_out
  6717. - func: tril(Tensor self, int diagonal=0) -> Tensor
  6718. structured_delegate: tril.out
  6719. variants: method, function
  6720. - func: tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  6721. dispatch:
  6722. CPU: tril_indices_cpu
  6723. CUDA: tril_indices_cuda
  6724. autogen: tril_indices.out
  6725. - func: triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  6726. dispatch:
  6727. CPU: triu_indices_cpu
  6728. CUDA: triu_indices_cuda
  6729. autogen: triu_indices.out
  6730. - func: trace(Tensor self) -> Tensor
  6731. variants: method, function
  6732. dispatch:
  6733. CPU: trace_cpu
  6734. CUDA: trace_cuda
  6735. MPS: trace_mps_out
  6736. autogen: trace.out
  6737. - func: trace_backward(Tensor grad, SymInt[] sizes) -> Tensor
  6738. variants: function
  6739. device_check: NoCheck
  6740. device_guard: False
  6741. dispatch:
  6742. CompositeImplicitAutograd: trace_backward_symint
  6743. - func: ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
  6744. structured: True
  6745. structured_inherits: TensorIteratorBase
  6746. device_check: NoCheck # TensorIterator
  6747. dispatch:
  6748. CPU, CUDA: ne_Scalar_out
  6749. MPS: ne_scalar_out_mps
  6750. QuantizedCPU: ne_out_quantized_cpu
  6751. tags: pointwise
  6752. - func: ne.Scalar(Tensor self, Scalar other) -> Tensor
  6753. structured_delegate: ne.Scalar_out
  6754. device_check: NoCheck # TensorIterator
  6755. variants: method, function
  6756. dispatch:
  6757. QuantizedCPU: ne_quantized_cpu
  6758. tags: [core, pointwise]
  6759. - func: ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  6760. structured: True
  6761. structured_inherits: TensorIteratorBase
  6762. device_check: NoCheck # TensorIterator
  6763. dispatch:
  6764. CPU, CUDA: ne_Tensor_out
  6765. MPS: ne_tensor_out_mps
  6766. QuantizedCPU: ne_out_quantized_cpu
  6767. tags: pointwise
  6768. - func: ne.Tensor(Tensor self, Tensor other) -> Tensor
  6769. structured_delegate: ne.Tensor_out
  6770. device_check: NoCheck # TensorIterator
  6771. variants: method, function
  6772. dispatch:
  6773. QuantizedCPU: ne_quantized_cpu
  6774. tags: [core, pointwise]
  6775. - func: ne_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
  6776. structured_delegate: ne.Scalar_out
  6777. device_check: NoCheck # TensorIterator
  6778. variants: method
  6779. - func: ne_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
  6780. structured_delegate: ne.Tensor_out
  6781. device_check: NoCheck # TensorIterator
  6782. variants: method
  6783. # not_equal, alias for torch.ne
  6784. - func: not_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
  6785. - func: not_equal.Scalar(Tensor self, Scalar other) -> Tensor
  6786. variants: method, function
  6787. - func: not_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  6788. - func: not_equal.Tensor(Tensor self, Tensor other) -> Tensor
  6789. variants: method, function
  6790. - func: not_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
  6791. variants: method
  6792. - func: not_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
  6793. variants: method
  6794. - func: eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
  6795. structured: True
  6796. structured_inherits: TensorIteratorBase
  6797. device_check: NoCheck # TensorIterator
  6798. dispatch:
  6799. CPU, CUDA: eq_Scalar_out
  6800. MPS: eq_scalar_out_mps
  6801. QuantizedCPU: eq_out_quantized_cpu
  6802. tags: pointwise
  6803. - func: eq.Scalar(Tensor self, Scalar other) -> Tensor
  6804. structured_delegate: eq.Scalar_out
  6805. device_check: NoCheck # TensorIterator
  6806. variants: method, function
  6807. dispatch:
  6808. QuantizedCPU: eq_quantized_cpu
  6809. tags: [core, pointwise]
  6810. - func: eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  6811. structured: True
  6812. structured_inherits: TensorIteratorBase
  6813. device_check: NoCheck # TensorIterator
  6814. dispatch:
  6815. CPU, CUDA: eq_Tensor_out
  6816. MPS: eq_tensor_out_mps
  6817. QuantizedCPU: eq_out_quantized_cpu
  6818. tags: pointwise
  6819. - func: eq.Tensor(Tensor self, Tensor other) -> Tensor
  6820. structured_delegate: eq.Tensor_out
  6821. device_check: NoCheck # TensorIterator
  6822. variants: method, function
  6823. dispatch:
  6824. QuantizedCPU: eq_quantized_cpu
  6825. tags: [core, pointwise]
  6826. - func: ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
  6827. structured: True
  6828. structured_inherits: TensorIteratorBase
  6829. device_check: NoCheck # TensorIterator
  6830. dispatch:
  6831. CPU, CUDA: ge_Scalar_out
  6832. MPS: ge_scalar_out_mps
  6833. QuantizedCPU: ge_out_quantized_cpu
  6834. tags: pointwise
  6835. - func: ge.Scalar(Tensor self, Scalar other) -> Tensor
  6836. structured_delegate: ge.Scalar_out
  6837. device_check: NoCheck # TensorIterator
  6838. variants: method, function
  6839. dispatch:
  6840. QuantizedCPU: ge_quantized_cpu
  6841. tags: [core, pointwise]
  6842. - func: ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  6843. structured: True
  6844. structured_inherits: TensorIteratorBase
  6845. device_check: NoCheck # TensorIterator
  6846. dispatch:
  6847. CPU, CUDA: ge_Tensor_out
  6848. MPS: ge_tensor_out_mps
  6849. QuantizedCPU: ge_out_quantized_cpu
  6850. tags: pointwise
  6851. - func: ge.Tensor(Tensor self, Tensor other) -> Tensor
  6852. structured_delegate: ge.Tensor_out
  6853. device_check: NoCheck # TensorIterator
  6854. variants: method, function
  6855. dispatch:
  6856. QuantizedCPU: ge_quantized_cpu
  6857. tags: [core, pointwise]
  6858. - func: ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
  6859. structured_delegate: ge.Scalar_out
  6860. device_check: NoCheck # TensorIterator
  6861. variants: method
  6862. - func: ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
  6863. structured_delegate: ge.Tensor_out
  6864. device_check: NoCheck # TensorIterator
  6865. variants: method
  6866. # greater_equal, alias for torch.ge
  6867. - func: greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
  6868. - func: greater_equal.Scalar(Tensor self, Scalar other) -> Tensor
  6869. variants: method, function
  6870. - func: greater_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  6871. - func: greater_equal.Tensor(Tensor self, Tensor other) -> Tensor
  6872. variants: method, function
  6873. - func: greater_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
  6874. variants: method
  6875. - func: greater_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
  6876. variants: method
  6877. - func: le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
  6878. structured: True
  6879. structured_inherits: TensorIteratorBase
  6880. device_check: NoCheck # TensorIterator
  6881. dispatch:
  6882. CPU, CUDA: le_Scalar_out
  6883. MPS: le_scalar_out_mps
  6884. QuantizedCPU: le_out_quantized_cpu
  6885. tags: pointwise
  6886. - func: le.Scalar(Tensor self, Scalar other) -> Tensor
  6887. structured_delegate: le.Scalar_out
  6888. device_check: NoCheck # TensorIterator
  6889. variants: method, function
  6890. dispatch:
  6891. QuantizedCPU: le_quantized_cpu
  6892. tags: [core, pointwise]
  6893. - func: le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  6894. structured: True
  6895. structured_inherits: TensorIteratorBase
  6896. device_check: NoCheck # TensorIterator
  6897. dispatch:
  6898. CPU, CUDA: le_Tensor_out
  6899. MPS: le_tensor_out_mps
  6900. QuantizedCPU: le_out_quantized_cpu
  6901. tags: pointwise
  6902. - func: le.Tensor(Tensor self, Tensor other) -> Tensor
  6903. structured_delegate: le.Tensor_out
  6904. device_check: NoCheck # TensorIterator
  6905. variants: method, function
  6906. dispatch:
  6907. QuantizedCPU: le_quantized_cpu
  6908. tags: [core, pointwise]
  6909. - func: le_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
  6910. structured_delegate: le.Scalar_out
  6911. device_check: NoCheck # TensorIterator
  6912. variants: method
  6913. - func: le_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
  6914. structured_delegate: le.Tensor_out
  6915. device_check: NoCheck # TensorIterator
  6916. variants: method
  6917. # less_equal, alias for torch.le
  6918. - func: less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
  6919. - func: less_equal.Scalar(Tensor self, Scalar other) -> Tensor
  6920. variants: method, function
  6921. - func: less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  6922. - func: less_equal.Tensor(Tensor self, Tensor other) -> Tensor
  6923. variants: method, function
  6924. - func: less_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
  6925. variants: method
  6926. - func: less_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
  6927. variants: method
  6928. - func: gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
  6929. structured: True
  6930. structured_inherits: TensorIteratorBase
  6931. device_check: NoCheck # TensorIterator
  6932. dispatch:
  6933. CPU, CUDA: gt_Scalar_out
  6934. MPS: gt_scalar_out_mps
  6935. QuantizedCPU: gt_out_quantized_cpu
  6936. tags: pointwise
  6937. - func: gt.Scalar(Tensor self, Scalar other) -> Tensor
  6938. structured_delegate: gt.Scalar_out
  6939. device_check: NoCheck # TensorIterator
  6940. variants: method, function
  6941. dispatch:
  6942. QuantizedCPU: gt_quantized_cpu
  6943. tags: [core, pointwise]
  6944. - func: gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  6945. structured: True
  6946. structured_inherits: TensorIteratorBase
  6947. device_check: NoCheck # TensorIterator
  6948. dispatch:
  6949. CPU, CUDA: gt_Tensor_out
  6950. MPS: gt_tensor_out_mps
  6951. QuantizedCPU: gt_out_quantized_cpu
  6952. tags: pointwise
  6953. - func: gt.Tensor(Tensor self, Tensor other) -> Tensor
  6954. structured_delegate: gt.Tensor_out
  6955. device_check: NoCheck # TensorIterator
  6956. variants: method, function
  6957. dispatch:
  6958. QuantizedCPU: gt_quantized_cpu
  6959. tags: [core, pointwise]
  6960. - func: gt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
  6961. structured_delegate: gt.Scalar_out
  6962. device_check: NoCheck # TensorIterator
  6963. variants: method
  6964. - func: gt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
  6965. structured_delegate: gt.Tensor_out
  6966. device_check: NoCheck # TensorIterator
  6967. variants: method
  6968. # greater, alias for torch.gt
  6969. - func: greater.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
  6970. - func: greater.Scalar(Tensor self, Scalar other) -> Tensor
  6971. variants: method, function
  6972. - func: greater.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  6973. - func: greater.Tensor(Tensor self, Tensor other) -> Tensor
  6974. variants: method, function
  6975. - func: greater_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
  6976. variants: method
  6977. - func: greater_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
  6978. variants: method
  6979. - func: lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
  6980. structured: True
  6981. structured_inherits: TensorIteratorBase
  6982. device_check: NoCheck # TensorIterator
  6983. dispatch:
  6984. CPU, CUDA: lt_Scalar_out
  6985. MPS: lt_scalar_out_mps
  6986. QuantizedCPU: lt_out_quantized_cpu
  6987. tags: pointwise
  6988. - func: lt.Scalar(Tensor self, Scalar other) -> Tensor
  6989. structured_delegate: lt.Scalar_out
  6990. device_check: NoCheck # TensorIterator
  6991. variants: method, function
  6992. dispatch:
  6993. QuantizedCPU: lt_quantized_cpu
  6994. tags: [core, pointwise]
  6995. - func: lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  6996. structured: True
  6997. structured_inherits: TensorIteratorBase
  6998. device_check: NoCheck # TensorIterator
  6999. dispatch:
  7000. CPU, CUDA: lt_Tensor_out
  7001. MPS: lt_tensor_out_mps
  7002. QuantizedCPU: lt_out_quantized_cpu
  7003. tags: pointwise
  7004. - func: lt.Tensor(Tensor self, Tensor other) -> Tensor
  7005. structured_delegate: lt.Tensor_out
  7006. device_check: NoCheck # TensorIterator
  7007. variants: method, function
  7008. dispatch:
  7009. QuantizedCPU: lt_quantized_cpu
  7010. tags: [core, pointwise]
  7011. - func: lt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
  7012. structured_delegate: lt.Scalar_out
  7013. device_check: NoCheck # TensorIterator
  7014. variants: method
  7015. - func: lt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
  7016. structured_delegate: lt.Tensor_out
  7017. device_check: NoCheck # TensorIterator
  7018. variants: method
  7019. # less, alias for torch.lt
  7020. - func: less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
  7021. - func: less.Scalar(Tensor self, Scalar other) -> Tensor
  7022. variants: method, function
  7023. - func: less.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  7024. - func: less.Tensor(Tensor self, Tensor other) -> Tensor
  7025. variants: method, function
  7026. - func: less_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
  7027. variants: method
  7028. - func: less_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
  7029. variants: method
  7030. - func: take.out(Tensor self, Tensor index, *, Tensor(a!) out) -> Tensor(a!)
  7031. dispatch:
  7032. CPU, CUDA: take_out
  7033. - func: take(Tensor self, Tensor index) -> Tensor
  7034. variants: method, function
  7035. dispatch:
  7036. CPU, CUDA: take
  7037. - func: take_along_dim.out(Tensor self, Tensor indices, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
  7038. - func: take_along_dim(Tensor self, Tensor indices, int? dim=None) -> Tensor
  7039. variants: method, function
  7040. - func: index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)
  7041. dispatch:
  7042. CPU, QuantizedCPU: index_select_out_cpu_
  7043. CUDA, QuantizedCUDA: index_select_out_cuda
  7044. MPS: index_select_out_mps
  7045. - func: index_select(Tensor self, int dim, Tensor index) -> Tensor
  7046. variants: method, function
  7047. dispatch:
  7048. CPU: index_select_cpu_
  7049. QuantizedCPU: index_select_quantized_cpu_
  7050. CUDA: index_select_cuda
  7051. QuantizedCUDA: index_select_quantized_cuda
  7052. SparseCPU: index_select_sparse_cpu
  7053. SparseCUDA: index_select_sparse_cuda
  7054. MPS: index_select_mps
  7055. tags: core
  7056. - func: index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)
  7057. - func: index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor
  7058. variants: method, function
  7059. - func: index_select_backward(Tensor grad, SymInt[] self_sizes, int dim, Tensor index) -> Tensor
  7060. variants: function
  7061. device_check: NoCheck
  7062. device_guard: False
  7063. dispatch:
  7064. CompositeImplicitAutograd: index_select_backward_symint
  7065. - func: masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!)
  7066. dispatch:
  7067. CPU: masked_select_out_cpu
  7068. CUDA: masked_select_out_cuda
  7069. MPS: masked_select_out_mps
  7070. tags: dynamic_output_shape
  7071. - func: masked_select(Tensor self, Tensor mask) -> Tensor
  7072. variants: method, function
  7073. dispatch:
  7074. CPU: masked_select_cpu
  7075. CUDA: masked_select_cuda
  7076. MPS: masked_select_mps
  7077. tags: dynamic_output_shape
  7078. - func: masked_select_backward(Tensor grad, Tensor input, Tensor mask) -> Tensor
  7079. variants: function
  7080. device_check: NoCheck
  7081. device_guard: False
  7082. - func: nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  7083. dispatch:
  7084. CPU: nonzero_out_cpu
  7085. CUDA: nonzero_out_cuda
  7086. MPS: nonzero_out_mps
  7087. tags: dynamic_output_shape
  7088. - func: nonzero(Tensor self) -> Tensor
  7089. variants: method, function
  7090. dispatch:
  7091. CPU: nonzero_cpu
  7092. CUDA: nonzero_cuda
  7093. MPS: nonzero_mps
  7094. tags: [dynamic_output_shape, core]
  7095. - func: nonzero_numpy(Tensor self) -> Tensor[]
  7096. variants: method, function
  7097. - func: argwhere(Tensor self) -> Tensor
  7098. variants: method, function
  7099. tags: dynamic_output_shape
  7100. - func: gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)
  7101. structured: True
  7102. dispatch:
  7103. CPU, CUDA: gather_out
  7104. MPS: gather_out_mps
  7105. - func: gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor
  7106. variants: method, function
  7107. structured_delegate: gather.out
  7108. tags: core
  7109. - func: gather_backward(Tensor grad, Tensor self, int dim, Tensor index, bool sparse_grad) -> Tensor
  7110. variants: function
  7111. device_check: NoCheck
  7112. device_guard: False
  7113. - func: gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)
  7114. - func: gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor
  7115. variants: method, function
  7116. - func: _gather_sparse_backward(Tensor self, int dim, Tensor index, Tensor grad) -> Tensor
  7117. - func: addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)
  7118. structured: True
  7119. structured_inherits: TensorIteratorBase
  7120. device_check: NoCheck # TensorIterator
  7121. dispatch:
  7122. CPU, CUDA: addcmul_out
  7123. MPS: addcmul_out_mps
  7124. tags: pointwise
  7125. - func: addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor
  7126. structured_delegate: addcmul.out
  7127. device_check: NoCheck # TensorIterator
  7128. variants: method, function
  7129. tags: pointwise
  7130. - func: addcmul_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)
  7131. structured_delegate: addcmul.out
  7132. device_check: NoCheck # TensorIterator
  7133. variants: method
  7134. tags: pointwise
  7135. - func: addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)
  7136. structured: True
  7137. structured_inherits: TensorIteratorBase
  7138. device_check: NoCheck # TensorIterator
  7139. dispatch:
  7140. CPU, CUDA: addcdiv_out
  7141. MPS: addcdiv_out_mps
  7142. tags: pointwise
  7143. - func: addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor
  7144. structured_delegate: addcdiv.out
  7145. device_check: NoCheck # TensorIterator
  7146. variants: method, function
  7147. tags: pointwise
  7148. - func: addcdiv_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)
  7149. structured_delegate: addcdiv.out
  7150. device_check: NoCheck # TensorIterator
  7151. variants: method
  7152. tags: pointwise
  7153. - func: cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, float label_smoothing=0.0) -> Tensor
  7154. python_module: nn
  7155. dispatch:
  7156. CompositeImplicitAutograd: cross_entropy_loss_symint
  7157. - func: triangular_solve.X(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) -> (Tensor(a!) solution, Tensor(b!) cloned_coefficient)
  7158. structured: True
  7159. dispatch:
  7160. CPU, CUDA: triangular_solve_out
  7161. MPS: triangular_solve_mps_out
  7162. SparseCsrCPU: triangular_solve_out_sparse_csr_cpu
  7163. SparseCsrCUDA: triangular_solve_out_sparse_csr_cuda
  7164. - func: triangular_solve(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False) -> (Tensor solution, Tensor cloned_coefficient)
  7165. structured_delegate: triangular_solve.X
  7166. variants: method, function
  7167. - func: _linalg_check_errors(Tensor info, str api_name, *, bool is_matrix) -> ()
  7168. dispatch:
  7169. CompositeExplicitAutograd: _linalg_check_errors
  7170. - func: linalg_solve_triangular.out(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False, Tensor(a!) out) -> Tensor(a!)
  7171. python_module: linalg
  7172. dispatch:
  7173. CPU, CUDA: linalg_solve_triangular_out
  7174. MPS: linalg_solve_triangular_mps_out
  7175. - func: linalg_solve_triangular(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False) -> Tensor
  7176. python_module: linalg
  7177. variants: function
  7178. dispatch:
  7179. CPU, CUDA: linalg_solve_triangular
  7180. MPS: linalg_solve_triangular_mps
  7181. - func: linalg_vander(Tensor x, *, int? N=None) -> Tensor
  7182. python_module: linalg
  7183. - func: svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V)
  7184. - func: svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V)
  7185. variants: method, function
  7186. # swapaxes, alias for transpose
  7187. - func: swapaxes(Tensor(a) self, int axis0, int axis1) -> Tensor(a)
  7188. variants: function, method
  7189. device_check: NoCheck
  7190. device_guard: False
  7191. - func: swapaxes_(Tensor(a!) self, int axis0, int axis1) -> Tensor(a!)
  7192. variants: method
  7193. device_check: NoCheck
  7194. device_guard: False
  7195. tags: inplace_view
  7196. # swapdims, alias for transpose
  7197. - func: swapdims(Tensor(a) self, int dim0, int dim1) -> Tensor(a)
  7198. variants: function, method
  7199. device_check: NoCheck
  7200. device_guard: False
  7201. - func: swapdims_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)
  7202. variants: method
  7203. device_check: NoCheck
  7204. device_guard: False
  7205. tags: inplace_view
  7206. - func: cholesky.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)
  7207. dispatch:
  7208. CPU, CUDA: cholesky_out
  7209. - func: cholesky(Tensor self, bool upper=False) -> Tensor
  7210. variants: method, function
  7211. dispatch:
  7212. CPU, CUDA: cholesky
  7213. - func: cholesky_solve.out(Tensor self, Tensor input2, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)
  7214. dispatch:
  7215. CompositeExplicitAutograd: cholesky_solve_out
  7216. - func: cholesky_solve(Tensor self, Tensor input2, bool upper=False) -> Tensor
  7217. variants: method, function
  7218. dispatch:
  7219. CompositeExplicitAutograd: cholesky_solve
  7220. - func: _cholesky_solve_helper(Tensor self, Tensor A, bool upper) -> Tensor
  7221. variants: function
  7222. dispatch:
  7223. CPU: _cholesky_solve_helper_cpu
  7224. CUDA: _cholesky_solve_helper_cuda
  7225. autogen: _cholesky_solve_helper.out
  7226. - func: cholesky_inverse(Tensor self, bool upper=False) -> Tensor
  7227. variants: method, function
  7228. dispatch:
  7229. CPU, CUDA: cholesky_inverse
  7230. - func: cholesky_inverse.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)
  7231. dispatch:
  7232. CPU, CUDA: cholesky_inverse_out
  7233. - func: qr.Q(Tensor self, bool some=True, *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)
  7234. - func: qr(Tensor self, bool some=True) -> (Tensor Q, Tensor R)
  7235. variants: method, function
  7236. - func: geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau)
  7237. dispatch:
  7238. CPU, CUDA: geqrf_out
  7239. - func: geqrf(Tensor self) -> (Tensor a, Tensor tau)
  7240. variants: method, function
  7241. dispatch:
  7242. CPU, CUDA: geqrf
  7243. # orgqr, alias for linalg_householder_product
  7244. - func: orgqr(Tensor self, Tensor input2) -> Tensor
  7245. variants: method, function
  7246. - func: orgqr.out(Tensor self, Tensor input2, *, Tensor(a!) out) -> Tensor(a!)
  7247. - func: ormqr.out(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False, *, Tensor(a!) out) -> Tensor(a!)
  7248. dispatch:
  7249. CPU, CUDA: ormqr_out
  7250. - func: ormqr(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False) -> Tensor
  7251. variants: method, function
  7252. dispatch:
  7253. CPU, CUDA: ormqr
  7254. - func: _lu_with_info(Tensor self, bool pivot=True, bool check_errors=True) -> (Tensor LU, Tensor pivots, Tensor info)
  7255. variants: function
  7256. - func: lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!)
  7257. - func: lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor
  7258. variants: method, function
  7259. # lu_unpack
  7260. - func: lu_unpack(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True) -> (Tensor P, Tensor L, Tensor U)
  7261. structured_delegate: lu_unpack.out
  7262. variants: function
  7263. - func: lu_unpack.out(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True, *, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U)
  7264. variants: function
  7265. structured: True
  7266. dispatch:
  7267. CPU, CUDA: lu_unpack_out
  7268. # TODO: remove dispatch section when porting TH CUDA to ATen
  7269. - func: multinomial.out(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
  7270. tags: nondeterministic_seeded
  7271. dispatch:
  7272. CPU, CUDA: multinomial_out
  7273. MPS: multinomial_out_mps
  7274. - func: multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor
  7275. variants: method, function
  7276. dispatch:
  7277. CPU, CUDA: multinomial
  7278. MPS: multinomial_mps
  7279. tags: nondeterministic_seeded
  7280. - func: lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  7281. device_check: NoCheck # TensorIterator
  7282. structured: True
  7283. structured_inherits: TensorIteratorBase
  7284. dispatch:
  7285. CPU, CUDA: lgamma_out
  7286. tags: pointwise
  7287. - func: lgamma_(Tensor(a!) self) -> Tensor(a!)
  7288. device_check: NoCheck # TensorIterator
  7289. structured_delegate: lgamma.out
  7290. variants: method
  7291. tags: pointwise
  7292. - func: lgamma(Tensor self) -> Tensor
  7293. device_check: NoCheck # TensorIterator
  7294. structured_delegate: lgamma.out
  7295. variants: method, function
  7296. tags: pointwise
  7297. - func: digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  7298. device_check: NoCheck # TensorIterator
  7299. structured: True
  7300. structured_inherits: TensorIteratorBase
  7301. dispatch:
  7302. CPU, CUDA: digamma_out
  7303. tags: pointwise
  7304. - func: digamma(Tensor self) -> Tensor
  7305. device_check: NoCheck # TensorIterator
  7306. structured_delegate: digamma.out
  7307. variants: method, function
  7308. tags: pointwise
  7309. - func: polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  7310. device_check: NoCheck # TensorIterator
  7311. structured: True
  7312. structured_inherits: TensorIteratorBase
  7313. dispatch:
  7314. CPU, CUDA: polygamma_out
  7315. tags: pointwise
  7316. - func: polygamma(int n, Tensor self) -> Tensor
  7317. device_check: NoCheck # TensorIterator
  7318. structured_delegate: polygamma.out
  7319. variants: method, function
  7320. tags: pointwise
  7321. - func: polygamma_(Tensor(a!) self, int n) -> Tensor(a!)
  7322. device_check: NoCheck # TensorIterator
  7323. variants: method
  7324. dispatch:
  7325. CompositeExplicitAutograd: polygamma_
  7326. tags: pointwise
  7327. - func: erfinv(Tensor self) -> Tensor
  7328. device_check: NoCheck # TensorIterator
  7329. structured_delegate: erfinv.out
  7330. variants: method, function
  7331. dispatch:
  7332. SparseCPU, SparseCUDA: erfinv_sparse
  7333. SparseCsrCPU, SparseCsrCUDA: erfinv_sparse_csr
  7334. tags: pointwise
  7335. - func: erfinv_(Tensor(a!) self) -> Tensor(a!)
  7336. device_check: NoCheck # TensorIterator
  7337. structured_delegate: erfinv.out
  7338. variants: method
  7339. dispatch:
  7340. SparseCPU, SparseCUDA: erfinv_sparse_
  7341. SparseCsrCPU, SparseCsrCUDA: erfinv_sparse_csr_
  7342. tags: pointwise
  7343. - func: erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  7344. device_check: NoCheck # TensorIterator
  7345. structured: True
  7346. structured_inherits: TensorIteratorBase
  7347. dispatch:
  7348. CPU, CUDA: erfinv_out
  7349. SparseCPU, SparseCUDA: erfinv_sparse_out
  7350. SparseCsrCPU, SparseCsrCUDA: erfinv_sparse_csr_out
  7351. tags: pointwise
  7352. - func: i0(Tensor self) -> Tensor
  7353. structured_delegate: i0.out
  7354. variants: function, method
  7355. tags: pointwise
  7356. - func: i0_(Tensor(a!) self) -> Tensor(a!)
  7357. structured_delegate: i0.out
  7358. variants: function, method
  7359. tags: pointwise
  7360. - func: i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  7361. structured: True
  7362. structured_inherits: TensorIteratorBase
  7363. dispatch:
  7364. CPU, CUDA: i0_out
  7365. tags: pointwise
  7366. - func: sign(Tensor self) -> Tensor
  7367. device_check: NoCheck # TensorIterator
  7368. structured_delegate: sign.out
  7369. variants: function, method
  7370. dispatch:
  7371. SparseCPU, SparseCUDA: sign_sparse
  7372. SparseCsrCPU, SparseCsrCUDA: sign_sparse_csr
  7373. tags: [core, pointwise]
  7374. - func: sign_(Tensor(a!) self) -> Tensor(a!)
  7375. device_check: NoCheck # TensorIterator
  7376. structured_delegate: sign.out
  7377. variants: method
  7378. dispatch:
  7379. SparseCPU, SparseCUDA: sign_sparse_
  7380. SparseCsrCPU, SparseCsrCUDA: sign_sparse_csr_
  7381. tags: pointwise
  7382. - func: sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  7383. device_check: NoCheck # TensorIterator
  7384. structured: True
  7385. structured_inherits: TensorIteratorBase
  7386. dispatch:
  7387. CPU, CUDA: sign_out
  7388. MPS: sign_out_mps
  7389. SparseCPU, SparseCUDA: sign_sparse_out
  7390. SparseCsrCPU, SparseCsrCUDA: sign_sparse_csr_out
  7391. tags: pointwise
  7392. - func: signbit(Tensor self) -> Tensor
  7393. variants: function, method
  7394. structured_delegate: signbit.out
  7395. dispatch:
  7396. SparseCPU, SparseCUDA: signbit_sparse
  7397. SparseCsrCPU, SparseCsrCUDA: signbit_sparse_csr
  7398. tags: pointwise
  7399. - func: signbit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  7400. structured: True
  7401. structured_inherits: TensorIteratorBase
  7402. dispatch:
  7403. CPU: signbit_out
  7404. CUDA: signbit_out
  7405. MPS: signbit_out_mps
  7406. SparseCPU, SparseCUDA: signbit_sparse_out
  7407. SparseCsrCPU, SparseCsrCUDA: signbit_sparse_csr_out
  7408. tags: pointwise
  7409. - func: dist(Tensor self, Tensor other, Scalar p=2) -> Tensor
  7410. device_check: NoCheck # TensorIterator
  7411. variants: method, function
  7412. dispatch:
  7413. CompositeExplicitAutograd: dist
  7414. autogen: dist.out
  7415. - func: atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  7416. device_check: NoCheck # TensorIterator
  7417. structured: True
  7418. structured_inherits: TensorIteratorBase
  7419. dispatch:
  7420. CPU, CUDA: atan2_out
  7421. MPS: atan2_mps_out
  7422. tags: pointwise
  7423. - func: atan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)
  7424. device_check: NoCheck # TensorIterator
  7425. structured_delegate: atan2.out
  7426. variants: method
  7427. tags: pointwise
  7428. - func: atan2(Tensor self, Tensor other) -> Tensor
  7429. device_check: NoCheck # TensorIterator
  7430. structured_delegate: atan2.out
  7431. variants: method, function
  7432. tags: pointwise
  7433. # arctan2, alias of atan2
  7434. - func: arctan2(Tensor self, Tensor other) -> Tensor
  7435. variants: method, function
  7436. - func: arctan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  7437. device_check: NoCheck # TensorIterator
  7438. - func: arctan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)
  7439. variants: method
  7440. - func: lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!)
  7441. device_check: NoCheck # TensorIterator
  7442. structured: True
  7443. structured_inherits: TensorIteratorBase
  7444. dispatch:
  7445. CPU, CUDA: lerp_Scalar
  7446. tags: pointwise
  7447. - func: lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)
  7448. device_check: NoCheck # TensorIterator
  7449. structured: True
  7450. structured_inherits: TensorIteratorBase
  7451. dispatch:
  7452. CPU, CUDA: lerp_Tensor
  7453. tags: pointwise
  7454. - func: lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor
  7455. device_check: NoCheck # TensorIterator
  7456. variants: method, function
  7457. structured_delegate: lerp.Scalar_out
  7458. tags: pointwise
  7459. - func: lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor
  7460. device_check: NoCheck # TensorIterator
  7461. variants: method, function
  7462. structured_delegate: lerp.Tensor_out
  7463. tags: pointwise
  7464. - func: histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!)
  7465. dispatch:
  7466. CPU: histogram_histc_cpu_out
  7467. CUDA: _histc_out_cuda
  7468. - func: histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor
  7469. variants: method, function
  7470. dispatch:
  7471. CPU: histogram_histc_cpu
  7472. CUDA: _histc_cuda
  7473. - func: histogram.bins_tensor_out(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges)
  7474. dispatch:
  7475. CPU: histogram_out_cpu
  7476. - func: histogram.bins_tensor(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges)
  7477. variants: method, function
  7478. dispatch:
  7479. CPU: histogram_cpu
  7480. - func: histogram.bin_ct_out(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges)
  7481. dispatch:
  7482. CPU: histogram_out_cpu
  7483. - func: histogram.bin_ct(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges)
  7484. variants: method, function
  7485. dispatch:
  7486. CPU: histogram_cpu
  7487. - func: _histogramdd_bin_edges(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor[]
  7488. dispatch:
  7489. CPU: histogramdd_bin_edges_cpu
  7490. autogen: _histogramdd_bin_edges.out
  7491. - func: _histogramdd_from_bin_cts(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor
  7492. dispatch:
  7493. CPU: histogramdd_cpu
  7494. autogen: _histogramdd_from_bin_cts.out
  7495. - func: _histogramdd_from_bin_tensors(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False) -> Tensor
  7496. dispatch:
  7497. CPU: histogramdd_cpu
  7498. autogen: _histogramdd_from_bin_tensors.out
  7499. - func: histogramdd(Tensor self, int[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)
  7500. - func: histogramdd.int_bins(Tensor self, int bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)
  7501. - func: histogramdd.TensorList_bins(Tensor self, Tensor[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)
  7502. - func: fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
  7503. device_check: NoCheck # TensorIterator
  7504. dispatch:
  7505. CompositeExplicitAutograd: fmod_out
  7506. tags: pointwise
  7507. - func: fmod.Scalar(Tensor self, Scalar other) -> Tensor
  7508. device_check: NoCheck # TensorIterator
  7509. variants: method, function
  7510. dispatch:
  7511. CompositeExplicitAutograd: fmod
  7512. tags: pointwise
  7513. - func: fmod_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
  7514. device_check: NoCheck # TensorIterator
  7515. variants: method
  7516. dispatch:
  7517. CompositeExplicitAutograd: fmod_
  7518. tags: pointwise
  7519. - func: fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  7520. device_check: NoCheck # TensorIterator
  7521. structured: True
  7522. structured_inherits: TensorIteratorBase
  7523. dispatch:
  7524. CPU, CUDA: fmod_out
  7525. MPS: fmod_mps_out
  7526. tags: pointwise
  7527. - func: fmod.Tensor(Tensor self, Tensor other) -> Tensor
  7528. device_check: NoCheck # TensorIterator
  7529. structured_delegate: fmod.Tensor_out
  7530. variants: method, function
  7531. tags: [core, pointwise]
  7532. - func: fmod_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
  7533. device_check: NoCheck # TensorIterator
  7534. variants: method
  7535. structured_delegate: fmod.Tensor_out
  7536. tags: pointwise
  7537. - func: hypot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  7538. structured: True
  7539. structured_inherits: TensorIteratorBase
  7540. dispatch:
  7541. CPU, CUDA: hypot_out
  7542. tags: pointwise
  7543. - func: hypot(Tensor self, Tensor other) -> Tensor
  7544. structured_delegate: hypot.out
  7545. variants: method, function
  7546. tags: pointwise
  7547. - func: hypot_(Tensor(a!) self, Tensor other) -> Tensor(a!)
  7548. structured_delegate: hypot.out
  7549. variants: method
  7550. tags: pointwise
  7551. - func: igamma.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  7552. structured: True
  7553. structured_inherits: TensorIteratorBase
  7554. dispatch:
  7555. CPU, CUDA: igamma_out
  7556. tags: pointwise
  7557. - func: igamma(Tensor self, Tensor other) -> Tensor
  7558. structured_delegate: igamma.out
  7559. variants: method, function
  7560. tags: pointwise
  7561. - func: igamma_(Tensor(a!) self, Tensor other) -> Tensor(a!)
  7562. structured_delegate: igamma.out
  7563. variants: method
  7564. tags: pointwise
  7565. - func: igammac.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  7566. structured: True
  7567. structured_inherits: TensorIteratorBase
  7568. dispatch:
  7569. CPU, CUDA: igammac_out
  7570. tags: pointwise
  7571. - func: igammac(Tensor self, Tensor other) -> Tensor
  7572. structured_delegate: igammac.out
  7573. variants: method, function
  7574. tags: pointwise
  7575. - func: igammac_(Tensor(a!) self, Tensor other) -> Tensor(a!)
  7576. structured_delegate: igammac.out
  7577. variants: method
  7578. tags: pointwise
  7579. - func: nextafter.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  7580. structured: True
  7581. structured_inherits: TensorIteratorBase
  7582. dispatch:
  7583. CPU, CUDA: nextafter_out
  7584. tags: pointwise
  7585. - func: nextafter(Tensor self, Tensor other) -> Tensor
  7586. structured_delegate: nextafter.out
  7587. variants: method, function
  7588. tags: pointwise
  7589. - func: nextafter_(Tensor(a!) self, Tensor other) -> Tensor(a!)
  7590. structured_delegate: nextafter.out
  7591. variants: method
  7592. tags: pointwise
  7593. - func: remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
  7594. dispatch:
  7595. CompositeExplicitAutograd: remainder_out
  7596. tags: pointwise
  7597. - func: remainder.Scalar(Tensor self, Scalar other) -> Tensor
  7598. variants: method, function
  7599. dispatch:
  7600. CompositeExplicitAutograd: remainder
  7601. tags: pointwise
  7602. - func: remainder_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
  7603. variants: method
  7604. dispatch:
  7605. CompositeExplicitAutograd: remainder_
  7606. tags: pointwise
  7607. - func: remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  7608. device_check: NoCheck # TensorIterator
  7609. structured: True
  7610. structured_inherits: TensorIteratorBase
  7611. dispatch:
  7612. CPU, CUDA: remainder_out
  7613. MPS: remainder_out_mps
  7614. tags: pointwise
  7615. - func: remainder.Tensor(Tensor self, Tensor other) -> Tensor
  7616. device_check: NoCheck # TensorIterator
  7617. structured_delegate: remainder.Tensor_out
  7618. variants: method, function
  7619. tags: [core, pointwise]
  7620. - func: remainder_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
  7621. device_check: NoCheck # TensorIterator
  7622. structured_delegate: remainder.Tensor_out
  7623. variants: method
  7624. tags: pointwise
  7625. - func: remainder.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
  7626. device_check: NoCheck # TensorIterator
  7627. variants: function
  7628. dispatch:
  7629. CPU, CUDA, MPS: remainder
  7630. autogen: remainder.Scalar_Tensor_out
  7631. tags: pointwise
  7632. - func: min(Tensor self) -> Tensor
  7633. device_check: NoCheck # TensorIterator
  7634. variants: method, function
  7635. dispatch:
  7636. CPU, CUDA: min
  7637. MPS: min_mps
  7638. QuantizedCPU: min_quantized_cpu
  7639. # Not to be confused with binary op `min.out`. Commented because of failed CI
  7640. # FIXME: enable this
  7641. #- func: min.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  7642. # device_check: NoCheck # TensorIterator
  7643. # dispatch:
  7644. # CompositeExplicitAutograd: min_unary_out
  7645. - func: fmin(Tensor self, Tensor other) -> Tensor
  7646. structured_delegate: fmin.out
  7647. device_check: NoCheck # TensorIterator
  7648. variants: method, function
  7649. tags: pointwise
  7650. - func: fmin.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  7651. structured: True
  7652. structured_inherits: TensorIteratorBase
  7653. device_check: NoCheck # TensorIterator
  7654. dispatch:
  7655. CPU, CUDA: fmin_out
  7656. tags: pointwise
  7657. - func: max(Tensor self) -> Tensor
  7658. device_check: NoCheck # TensorIterator
  7659. variants: method, function
  7660. dispatch:
  7661. CPU, CUDA: max
  7662. MPS: max_mps
  7663. QuantizedCPU: max_quantized_cpu
  7664. - func: fmax(Tensor self, Tensor other) -> Tensor
  7665. structured_delegate: fmax.out
  7666. device_check: NoCheck # TensorIterator
  7667. variants: method, function
  7668. tags: pointwise
  7669. - func: fmax.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  7670. structured: True
  7671. structured_inherits: TensorIteratorBase
  7672. device_check: NoCheck # TensorIterator
  7673. dispatch:
  7674. CPU, CUDA: fmax_out
  7675. tags: pointwise
  7676. - func: maximum(Tensor self, Tensor other) -> Tensor
  7677. structured_delegate: maximum.out
  7678. device_check: NoCheck # TensorIterator
  7679. variants: method, function
  7680. tags: [core, pointwise]
  7681. - func: maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  7682. structured: True
  7683. structured_inherits: TensorIteratorBase
  7684. device_check: NoCheck # TensorIterator
  7685. dispatch:
  7686. CPU, CUDA: maximum_out
  7687. MPS: maximum_out_mps
  7688. tags: pointwise
  7689. # binary max, alias of maximum
  7690. # NOTE: max is not an alias for maximum, since there is also unary max
  7691. - func: max.other(Tensor self, Tensor other) -> Tensor
  7692. device_check: NoCheck # TensorIterator
  7693. variants: method, function
  7694. tags: pointwise
  7695. - func: max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  7696. device_check: NoCheck # TensorIterator
  7697. tags: pointwise
  7698. - func: max.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  7699. device_check: NoCheck # TensorIterator
  7700. dispatch:
  7701. CPU, CUDA: max_unary_out
  7702. QuantizedCPU: max_quantized_unary_out
  7703. - func: minimum(Tensor self, Tensor other) -> Tensor
  7704. structured_delegate: minimum.out
  7705. device_check: NoCheck # TensorIterator
  7706. variants: method, function
  7707. tags: [core, pointwise]
  7708. - func: minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  7709. structured: True
  7710. structured_inherits: TensorIteratorBase
  7711. device_check: NoCheck # TensorIterator
  7712. dispatch:
  7713. CPU, CUDA: minimum_out
  7714. MPS: minimum_out_mps
  7715. tags: pointwise
  7716. # binary min, alias for minimum
  7717. # NOTE: min is not an alias for minimum, since there is also unary min
  7718. - func: min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  7719. device_check: NoCheck # TensorIterator
  7720. tags: pointwise
  7721. - func: min.other(Tensor self, Tensor other) -> Tensor
  7722. device_check: NoCheck # TensorIterator
  7723. variants: method, function
  7724. tags: pointwise
  7725. - func: quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
  7726. variants: method, function
  7727. - func: quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
  7728. - func: quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
  7729. variants: method, function
  7730. - func: quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
  7731. - func: nanquantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
  7732. variants: method, function
  7733. - func: nanquantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
  7734. - func: nanquantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
  7735. variants: method, function
  7736. - func: nanquantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
  7737. - func: sort.values(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
  7738. device_check: NoCheck # TensorIterator
  7739. dispatch:
  7740. CompositeExplicitAutograd: sort_out
  7741. - func: sort.values_stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
  7742. structured: True
  7743. dispatch:
  7744. CPU, CUDA: sort_stable_out
  7745. MPS: sort_stable_out_mps
  7746. - func: sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)
  7747. device_check: NoCheck # TensorIterator
  7748. variants: method, function
  7749. dispatch:
  7750. CompositeExplicitAutograd: sort
  7751. - func: sort.stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)
  7752. structured_delegate: sort.values_stable
  7753. variants: method, function
  7754. dispatch:
  7755. QuantizedCPU: sort_quantized_cpu_stable
  7756. - func: sort.dimname_values(Tensor self, Dimname dim, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
  7757. - func: sort.dimname_values_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
  7758. - func: sort.dimname(Tensor self, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices)
  7759. variants: method, function
  7760. - func: sort.dimname_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices)
  7761. variants: method, function
  7762. - func: msort.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  7763. - func: msort(Tensor self) -> Tensor
  7764. variants: method, function
  7765. - func: argsort(Tensor self, int dim=-1, bool descending=False) -> Tensor
  7766. device_check: NoCheck # TensorIterator
  7767. variants: method, function
  7768. - func: argsort.stable(Tensor self, *, bool stable, int dim=-1, bool descending=False) -> Tensor
  7769. device_check: NoCheck # TensorIterator
  7770. variants: method, function
  7771. dispatch:
  7772. CPU, CUDA, MPS: argsort_stable
  7773. autogen: argsort.stable_out
  7774. - func: argsort.dimname(Tensor self, Dimname dim, bool descending=False) -> Tensor
  7775. variants: method, function
  7776. - func: topk.values(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
  7777. structured: True
  7778. dispatch:
  7779. CPU: topk_out_cpu
  7780. CUDA: topk_out_cuda
  7781. MPS: topk_out_mps
  7782. - func: topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices)
  7783. variants: method, function
  7784. structured_delegate: topk.values
  7785. dispatch:
  7786. QuantizedCPU: topk_quantized_cpu
  7787. tags: core
  7788. - func: all(Tensor self) -> Tensor
  7789. device_check: NoCheck # TensorIterator
  7790. structured_delegate: all.all_out
  7791. variants: method, function
  7792. - func: all.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  7793. device_check: NoCheck
  7794. structured: True
  7795. dispatch:
  7796. CPU, CUDA: all_all_out
  7797. MPS: all_all_out_mps
  7798. - func: any(Tensor self) -> Tensor
  7799. device_check: NoCheck # TensorIterator
  7800. structured_delegate: any.all_out
  7801. variants: method, function
  7802. dispatch:
  7803. SparseCPU, SparseCUDA: any_sparse
  7804. - func: any.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  7805. device_check: NoCheck
  7806. structured: True
  7807. dispatch:
  7808. CPU, CUDA: any_all_out
  7809. MPS: any_all_out_mps
  7810. - func: renorm.out(Tensor self, Scalar p, int dim, Scalar maxnorm, *, Tensor(a!) out) -> Tensor(a!)
  7811. device_check: NoCheck # TensorIterator
  7812. structured: True
  7813. dispatch:
  7814. CPU, CUDA: renorm_out
  7815. - func: renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor
  7816. device_check: NoCheck # TensorIterator
  7817. variants: method, function
  7818. structured_delegate: renorm.out
  7819. - func: renorm_(Tensor(a!) self, Scalar p, int dim, Scalar maxnorm) -> Tensor(a!)
  7820. device_check: NoCheck # TensorIterator
  7821. variants: method
  7822. structured_delegate: renorm.out
  7823. - func: unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a)
  7824. variants: method
  7825. device_check: NoCheck
  7826. device_guard: False
  7827. dispatch:
  7828. CPU, CUDA, Meta, MPS: unfold
  7829. QuantizedCPU, QuantizedCUDA: unfold
  7830. - func: unfold_backward(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step) -> Tensor
  7831. variants: function
  7832. dispatch:
  7833. CPU, CUDA: unfold_backward
  7834. autogen: unfold_backward.out
  7835. - func: equal(Tensor self, Tensor other) -> bool
  7836. tags: [data_dependent_output, pointwise]
  7837. variants: method, function
  7838. dispatch:
  7839. CPU: cpu_equal
  7840. CUDA: cuda_equal
  7841. MPS: mps_equal
  7842. QuantizedCPU: equal_quantized_cpu
  7843. - func: pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
  7844. device_check: NoCheck # TensorIterator
  7845. structured: True
  7846. structured_inherits: TensorIteratorBase
  7847. dispatch:
  7848. CPU, CUDA: pow_Tensor_Tensor_out
  7849. MPS: pow_tensor_tensor_out_mps
  7850. tags: pointwise
  7851. - func: pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor
  7852. device_check: NoCheck # TensorIterator
  7853. structured_delegate: pow.Tensor_Tensor_out
  7854. variants: method, function
  7855. tags: [core, pointwise]
  7856. - func: pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
  7857. device_check: NoCheck # TensorIterator
  7858. structured: True
  7859. dispatch:
  7860. CPU, CUDA: pow_Scalar_out
  7861. tags: pointwise
  7862. - func: pow.Scalar(Scalar self, Tensor exponent) -> Tensor
  7863. device_check: NoCheck # TensorIterator
  7864. structured_delegate: pow.Scalar_out
  7865. tags: pointwise
  7866. - func: pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)
  7867. device_check: NoCheck # TensorIterator
  7868. structured: True
  7869. structured_inherits: TensorIteratorBase
  7870. dispatch:
  7871. CPU, CUDA: pow_Tensor_Scalar_out
  7872. SparseCPU, SparseCUDA: pow_out_sparse_scalar
  7873. MPS: pow_tensor_scalar_out_mps
  7874. tags: pointwise
  7875. - func: pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor
  7876. device_check: NoCheck # TensorIterator
  7877. structured_delegate: pow.Tensor_Scalar_out
  7878. variants: function, method
  7879. dispatch:
  7880. SparseCPU, SparseCUDA: pow_sparse_scalar
  7881. tags: [core, pointwise]
  7882. - func: pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)
  7883. device_check: NoCheck # TensorIterator
  7884. structured_delegate: pow.Tensor_Scalar_out
  7885. variants: method
  7886. tags: pointwise
  7887. - func: pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)
  7888. device_check: NoCheck # TensorIterator
  7889. structured_delegate: pow.Tensor_Tensor_out
  7890. variants: method
  7891. tags: pointwise
  7892. - func: float_power.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
  7893. tags: pointwise
  7894. - func: float_power.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor
  7895. variants: function, method
  7896. tags: pointwise
  7897. - func: float_power.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
  7898. tags: pointwise
  7899. - func: float_power.Scalar(Scalar self, Tensor exponent) -> Tensor
  7900. tags: pointwise
  7901. - func: float_power.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)
  7902. tags: pointwise
  7903. - func: float_power.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor
  7904. variants: function, method
  7905. tags: pointwise
  7906. - func: float_power_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)
  7907. variants: method
  7908. tags: pointwise
  7909. - func: float_power_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)
  7910. variants: method
  7911. tags: pointwise
  7912. - func: normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!)
  7913. device_check: NoCheck # TensorIterator
  7914. tags: nondeterministic_seeded
  7915. variants: method
  7916. dispatch:
  7917. CPU, CUDA: normal_
  7918. MPS: normal_mps_
  7919. Meta: normal_meta_
  7920. SparseCsrCPU, SparseCsrCUDA: normal_sparse_csr_
  7921. autogen: normal.out
  7922. # Only used by the functionalization pass.
  7923. # Normally, the codegen would be able to generate a normal() NativeFunction,
  7924. # but we can't due to overload ambiguity with normal.Tensor_float.
  7925. - func: normal_functional(Tensor self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor
  7926. device_check: NoCheck # TensorIterator
  7927. tags: nondeterministic_seeded
  7928. dispatch:
  7929. CompositeExplicitAutograd: normal_functional
  7930. - func: normal.Tensor_float_out(Tensor mean, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
  7931. tags: nondeterministic_seeded
  7932. dispatch:
  7933. CPU, CUDA: normal_out
  7934. MPS: normal_mps_out
  7935. Meta: normal_out_meta
  7936. - func: normal.Tensor_float(Tensor mean, float std=1, *, Generator? generator=None) -> Tensor
  7937. dispatch:
  7938. CPU, CUDA: normal
  7939. MPS: normal_mps
  7940. Meta: normal_meta
  7941. tags: nondeterministic_seeded
  7942. - func: normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
  7943. dispatch:
  7944. CPU, CUDA: normal_out
  7945. Meta: normal_out_meta
  7946. MPS: normal_mps_out
  7947. tags: nondeterministic_seeded
  7948. - func: normal.float_Tensor(float mean, Tensor std, *, Generator? generator=None) -> Tensor
  7949. dispatch:
  7950. CPU, CUDA: normal
  7951. MPS: normal_mps
  7952. Meta: normal_meta
  7953. tags: nondeterministic_seeded
  7954. - func: normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
  7955. dispatch:
  7956. CPU, CUDA: normal_out
  7957. Meta: normal_out_meta
  7958. MPS: normal_mps_out
  7959. tags: nondeterministic_seeded
  7960. - func: normal.Tensor_Tensor(Tensor mean, Tensor std, *, Generator? generator=None) -> Tensor
  7961. dispatch:
  7962. CPU, CUDA: normal
  7963. MPS: normal_mps
  7964. Meta: normal_meta
  7965. tags: nondeterministic_seeded
  7966. - func: normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  7967. dispatch:
  7968. CompositeExplicitAutograd: normal
  7969. tags: nondeterministic_seeded
  7970. - func: normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
  7971. dispatch:
  7972. CompositeExplicitAutograd: normal_out
  7973. tags: nondeterministic_seeded
  7974. - func: alias(Tensor(a) self) -> Tensor(a)
  7975. variants: method, function
  7976. dispatch:
  7977. CompositeExplicitAutograd: alias
  7978. tags: core
  7979. - func: _amp_foreach_non_finite_check_and_unscale_(Tensor(a!)[] self, Tensor(b!) found_inf, Tensor inv_scale) -> ()
  7980. variants: function
  7981. dispatch:
  7982. CUDA: _amp_foreach_non_finite_check_and_unscale_cuda_
  7983. autogen: _amp_foreach_non_finite_check_and_unscale, _amp_foreach_non_finite_check_and_unscale.out
  7984. - func: _amp_update_scale_(Tensor(a!) self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> Tensor(a!)
  7985. variants: function
  7986. dispatch:
  7987. CUDA: _amp_update_scale_cuda_
  7988. autogen: _amp_update_scale, _amp_update_scale.out
  7989. #- func: _cat(Tensor[] tensors, int dim=0) -> Tensor
  7990. #dispatch:
  7991. #CPU: _cat_cpu
  7992. #CUDA: cat_cuda
  7993. #MPS: cat_mps
  7994. #QuantizedCPU: cat_quantized_cpu
  7995. #- func: _cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
  7996. #dispatch:
  7997. #CPU: _cat_out_cpu
  7998. #CUDA: cat_out_cuda
  7999. #QuantizedCPU: cat_out_quantized_cpu
  8000. - func: _foreach_add.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
  8001. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8002. variants: function
  8003. dispatch:
  8004. CPU: foreach_tensor_add_scalar_kernel_slow
  8005. CUDA: foreach_tensor_add_scalar_kernel_cuda
  8006. - func: _foreach_add_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
  8007. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8008. variants: function
  8009. dispatch:
  8010. CPU: foreach_tensor_add_scalar_kernel_slow_
  8011. CUDA: foreach_tensor_add_scalar_kernel_cuda_
  8012. autogen: _foreach_add.Scalar_out
  8013. - func: _foreach_sub.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
  8014. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8015. variants: function
  8016. dispatch:
  8017. CPU: foreach_tensor_sub_scalar_kernel_slow
  8018. CUDA: foreach_tensor_sub_scalar_kernel_cuda
  8019. - func: _foreach_sub_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
  8020. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8021. variants: function
  8022. dispatch:
  8023. CPU: foreach_tensor_sub_scalar_kernel_slow_
  8024. CUDA: foreach_tensor_sub_scalar_kernel_cuda_
  8025. autogen: _foreach_sub.Scalar_out
  8026. - func: _foreach_mul.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
  8027. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8028. variants: function
  8029. dispatch:
  8030. CPU: foreach_tensor_mul_scalar_kernel_slow
  8031. CUDA: foreach_tensor_mul_scalar_kernel_cuda
  8032. - func: _foreach_mul_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
  8033. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8034. variants: function
  8035. dispatch:
  8036. CPU: foreach_tensor_mul_scalar_kernel_slow_
  8037. CUDA: foreach_tensor_mul_scalar_kernel_cuda_
  8038. autogen: _foreach_mul.Scalar_out
  8039. - func: _foreach_div.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
  8040. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8041. variants: function
  8042. dispatch:
  8043. CPU: foreach_tensor_div_scalar_kernel_slow
  8044. CUDA: foreach_tensor_div_scalar_kernel_cuda
  8045. - func: _foreach_div_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
  8046. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8047. variants: function
  8048. dispatch:
  8049. CPU: foreach_tensor_div_scalar_kernel_slow_
  8050. CUDA: foreach_tensor_div_scalar_kernel_cuda_
  8051. autogen: _foreach_div.Scalar_out
  8052. - func: _foreach_clamp_min.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
  8053. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8054. variants: function
  8055. dispatch:
  8056. CPU: foreach_tensor_clamp_min_scalar_kernel_slow
  8057. CUDA: foreach_tensor_clamp_min_scalar_kernel_cuda
  8058. - func: _foreach_clamp_min_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
  8059. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8060. variants: function
  8061. dispatch:
  8062. CPU: foreach_tensor_clamp_min_scalar_kernel_slow_
  8063. CUDA: foreach_tensor_clamp_min_scalar_kernel_cuda_
  8064. autogen: _foreach_clamp_min.Scalar_out
  8065. - func: _foreach_clamp_max.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
  8066. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8067. variants: function
  8068. dispatch:
  8069. CPU: foreach_tensor_clamp_max_scalar_kernel_slow
  8070. CUDA: foreach_tensor_clamp_max_scalar_kernel_cuda
  8071. - func: _foreach_clamp_max_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
  8072. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8073. variants: function
  8074. dispatch:
  8075. CPU: foreach_tensor_clamp_max_scalar_kernel_slow_
  8076. CUDA: foreach_tensor_clamp_max_scalar_kernel_cuda_
  8077. autogen: _foreach_clamp_max.Scalar_out
  8078. # foreach_minimum/maximum dispatches to clamp_max/min
  8079. - func: _foreach_maximum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
  8080. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8081. variants: function
  8082. dispatch:
  8083. CPU: foreach_tensor_clamp_min_scalar_kernel_slow
  8084. CUDA: foreach_tensor_clamp_min_scalar_kernel_cuda
  8085. - func: _foreach_maximum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
  8086. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8087. variants: function
  8088. dispatch:
  8089. CPU: foreach_tensor_clamp_min_scalar_kernel_slow_
  8090. CUDA: foreach_tensor_clamp_min_scalar_kernel_cuda_
  8091. autogen: _foreach_maximum.Scalar_out
  8092. - func: _foreach_minimum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
  8093. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8094. variants: function
  8095. dispatch:
  8096. CPU: foreach_tensor_clamp_max_scalar_kernel_slow
  8097. CUDA: foreach_tensor_clamp_max_scalar_kernel_cuda
  8098. - func: _foreach_minimum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
  8099. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8100. variants: function
  8101. dispatch:
  8102. CPU: foreach_tensor_clamp_max_scalar_kernel_slow_
  8103. CUDA: foreach_tensor_clamp_max_scalar_kernel_cuda_
  8104. autogen: _foreach_minimum.Scalar_out
  8105. - func: _foreach_add.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[]
  8106. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8107. variants: function
  8108. dispatch:
  8109. CPU: foreach_tensor_add_list_kernel_slow
  8110. CUDA: foreach_tensor_add_list_kernel_cuda
  8111. - func: _foreach_add_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> ()
  8112. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8113. variants: function
  8114. dispatch:
  8115. CPU: foreach_tensor_add_list_kernel_slow_
  8116. CUDA: foreach_tensor_add_list_kernel_cuda_
  8117. autogen: _foreach_add.List_out
  8118. - func: _foreach_sub.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[]
  8119. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8120. variants: function
  8121. dispatch:
  8122. CPU: foreach_tensor_sub_list_kernel_slow
  8123. CUDA: foreach_tensor_sub_list_kernel_cuda
  8124. - func: _foreach_sub_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> ()
  8125. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8126. variants: function
  8127. dispatch:
  8128. CPU: foreach_tensor_sub_list_kernel_slow_
  8129. CUDA: foreach_tensor_sub_list_kernel_cuda_
  8130. autogen: _foreach_sub.List_out
  8131. - func: _foreach_mul.List(Tensor[] self, Tensor[] other) -> Tensor[]
  8132. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8133. variants: function
  8134. dispatch:
  8135. CPU: foreach_tensor_mul_list_kernel_slow
  8136. CUDA: foreach_tensor_mul_list_kernel_cuda
  8137. - func: _foreach_mul_.List(Tensor(a!)[] self, Tensor[] other) -> ()
  8138. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8139. variants: function
  8140. dispatch:
  8141. CPU: foreach_tensor_mul_list_kernel_slow_
  8142. CUDA: foreach_tensor_mul_list_kernel_cuda_
  8143. autogen: _foreach_mul.List_out
  8144. - func: _foreach_div.List(Tensor[] self, Tensor[] other) -> Tensor[]
  8145. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8146. variants: function
  8147. dispatch:
  8148. CPU: foreach_tensor_div_list_kernel_slow
  8149. CUDA: foreach_tensor_div_list_kernel_cuda
  8150. - func: _foreach_div_.List(Tensor(a!)[] self, Tensor[] other) -> ()
  8151. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8152. variants: function
  8153. dispatch:
  8154. CPU: foreach_tensor_div_list_kernel_slow_
  8155. CUDA: foreach_tensor_div_list_kernel_cuda_
  8156. autogen: _foreach_div.List_out
  8157. - func: _foreach_clamp_min.List(Tensor[] self, Tensor[] other) -> Tensor[]
  8158. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8159. variants: function
  8160. dispatch:
  8161. CPU: foreach_tensor_clamp_min_list_kernel_slow
  8162. CUDA: foreach_tensor_clamp_min_list_kernel_cuda
  8163. - func: _foreach_clamp_min_.List(Tensor(a!)[] self, Tensor[] other) -> ()
  8164. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8165. variants: function
  8166. dispatch:
  8167. CPU: foreach_tensor_clamp_min_list_kernel_slow_
  8168. CUDA: foreach_tensor_clamp_min_list_kernel_cuda_
  8169. autogen: _foreach_clamp_min.List_out
  8170. - func: _foreach_clamp_max.List(Tensor[] self, Tensor[] other) -> Tensor[]
  8171. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8172. variants: function
  8173. dispatch:
  8174. CPU: foreach_tensor_clamp_max_list_kernel_slow
  8175. CUDA: foreach_tensor_clamp_max_list_kernel_cuda
  8176. - func: _foreach_clamp_max_.List(Tensor(a!)[] self, Tensor[] other) -> ()
  8177. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8178. variants: function
  8179. dispatch:
  8180. CPU: foreach_tensor_clamp_max_list_kernel_slow_
  8181. CUDA: foreach_tensor_clamp_max_list_kernel_cuda_
  8182. autogen: _foreach_clamp_max.List_out
  8183. # foreach_minimum/maximum dispatches to clamp_max/min
  8184. - func: _foreach_maximum.List(Tensor[] self, Tensor[] other) -> Tensor[]
  8185. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8186. variants: function
  8187. dispatch:
  8188. CPU: foreach_tensor_clamp_min_list_kernel_slow
  8189. CUDA: foreach_tensor_clamp_min_list_kernel_cuda
  8190. - func: _foreach_maximum_.List(Tensor(a!)[] self, Tensor[] other) -> ()
  8191. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8192. variants: function
  8193. dispatch:
  8194. CPU: foreach_tensor_clamp_min_list_kernel_slow_
  8195. CUDA: foreach_tensor_clamp_min_list_kernel_cuda_
  8196. autogen: _foreach_maximum.List_out
  8197. - func: _foreach_minimum.List(Tensor[] self, Tensor[] other) -> Tensor[]
  8198. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8199. variants: function
  8200. dispatch:
  8201. CPU: foreach_tensor_clamp_max_list_kernel_slow
  8202. CUDA: foreach_tensor_clamp_max_list_kernel_cuda
  8203. - func: _foreach_minimum_.List(Tensor(a!)[] self, Tensor[] other) -> ()
  8204. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8205. variants: function
  8206. dispatch:
  8207. CPU: foreach_tensor_clamp_max_list_kernel_slow_
  8208. CUDA: foreach_tensor_clamp_max_list_kernel_cuda_
  8209. autogen: _foreach_minimum.List_out
  8210. - func: _foreach_add.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
  8211. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8212. variants: function
  8213. dispatch:
  8214. CPU: foreach_tensor_add_scalarlist_kernel_slow
  8215. CUDA: foreach_tensor_add_scalarlist_kernel_cuda
  8216. - func: _foreach_add_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
  8217. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8218. variants: function
  8219. dispatch:
  8220. CPU: foreach_tensor_add_scalarlist_kernel_slow_
  8221. CUDA: foreach_tensor_add_scalarlist_kernel_cuda_
  8222. autogen: _foreach_add.ScalarList_out
  8223. - func: _foreach_sub.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
  8224. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8225. variants: function
  8226. dispatch:
  8227. CPU: foreach_tensor_sub_scalarlist_kernel_slow
  8228. CUDA: foreach_tensor_sub_scalarlist_kernel_cuda
  8229. - func: _foreach_sub_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
  8230. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8231. variants: function
  8232. dispatch:
  8233. CPU: foreach_tensor_sub_scalarlist_kernel_slow_
  8234. CUDA: foreach_tensor_sub_scalarlist_kernel_cuda_
  8235. autogen: _foreach_sub.ScalarList_out
  8236. - func: _foreach_div.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
  8237. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8238. variants: function
  8239. dispatch:
  8240. CPU: foreach_tensor_div_scalarlist_kernel_slow
  8241. CUDA: foreach_tensor_div_scalarlist_kernel_cuda
  8242. - func: _foreach_div_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
  8243. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8244. variants: function
  8245. dispatch:
  8246. CPU: foreach_tensor_div_scalarlist_kernel_slow_
  8247. CUDA: foreach_tensor_div_scalarlist_kernel_cuda_
  8248. autogen: _foreach_div.ScalarList_out
  8249. - func: _foreach_mul.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
  8250. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8251. variants: function
  8252. dispatch:
  8253. CPU: foreach_tensor_mul_scalarlist_kernel_slow
  8254. CUDA: foreach_tensor_mul_scalarlist_kernel_cuda
  8255. - func: _foreach_mul_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
  8256. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8257. variants: function
  8258. dispatch:
  8259. CPU: foreach_tensor_mul_scalarlist_kernel_slow_
  8260. CUDA: foreach_tensor_mul_scalarlist_kernel_cuda_
  8261. autogen: _foreach_mul.ScalarList_out
  8262. - func: _foreach_clamp_min.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
  8263. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8264. variants: function
  8265. dispatch:
  8266. CPU: foreach_tensor_clamp_min_scalarlist_kernel_slow
  8267. CUDA: foreach_tensor_clamp_min_scalarlist_kernel_cuda
  8268. - func: _foreach_clamp_min_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
  8269. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8270. variants: function
  8271. dispatch:
  8272. CPU: foreach_tensor_clamp_min_scalarlist_kernel_slow_
  8273. CUDA: foreach_tensor_clamp_min_scalarlist_kernel_cuda_
  8274. autogen: _foreach_clamp_min.ScalarList_out
  8275. - func: _foreach_clamp_max.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
  8276. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8277. variants: function
  8278. dispatch:
  8279. CPU: foreach_tensor_clamp_max_scalarlist_kernel_slow
  8280. CUDA: foreach_tensor_clamp_max_scalarlist_kernel_cuda
  8281. - func: _foreach_clamp_max_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
  8282. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8283. variants: function
  8284. dispatch:
  8285. CPU: foreach_tensor_clamp_max_scalarlist_kernel_slow_
  8286. CUDA: foreach_tensor_clamp_max_scalarlist_kernel_cuda_
  8287. autogen: _foreach_clamp_max.ScalarList_out
  8288. # foreach_minimum/maximum dispatches to clamp_max/min
  8289. - func: _foreach_maximum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
  8290. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8291. variants: function
  8292. dispatch:
  8293. CPU: foreach_tensor_clamp_min_scalarlist_kernel_slow
  8294. CUDA: foreach_tensor_clamp_min_scalarlist_kernel_cuda
  8295. - func: _foreach_maximum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
  8296. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8297. variants: function
  8298. dispatch:
  8299. CPU: foreach_tensor_clamp_min_scalarlist_kernel_slow_
  8300. CUDA: foreach_tensor_clamp_min_scalarlist_kernel_cuda_
  8301. autogen: _foreach_maximum.ScalarList_out
  8302. - func: _foreach_minimum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
  8303. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8304. variants: function
  8305. dispatch:
  8306. CPU: foreach_tensor_clamp_max_scalarlist_kernel_slow
  8307. CUDA: foreach_tensor_clamp_max_scalarlist_kernel_cuda
  8308. - func: _foreach_minimum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
  8309. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8310. variants: function
  8311. dispatch:
  8312. CPU: foreach_tensor_clamp_max_scalarlist_kernel_slow_
  8313. CUDA: foreach_tensor_clamp_max_scalarlist_kernel_cuda_
  8314. autogen: _foreach_minimum.ScalarList_out
  8315. - func: _foreach_exp(Tensor[] self) -> Tensor[]
  8316. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8317. variants: function
  8318. dispatch:
  8319. CPU: foreach_tensor_exp_slow
  8320. CUDA: foreach_tensor_exp_cuda
  8321. - func: _foreach_zero_(Tensor(a!)[] self) -> ()
  8322. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8323. variants: function
  8324. dispatch:
  8325. CPU: foreach_tensor_zero_slow_
  8326. CUDA: foreach_tensor_zero_cuda_
  8327. autogen: _foreach_zero, _foreach_zero.out
  8328. - func: _foreach_exp_(Tensor(a!)[] self) -> ()
  8329. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8330. variants: function
  8331. dispatch:
  8332. CPU: foreach_tensor_exp_slow_
  8333. CUDA: foreach_tensor_exp_cuda_
  8334. autogen: _foreach_exp.out
  8335. - func: _foreach_sqrt(Tensor[] self) -> Tensor[]
  8336. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8337. variants: function
  8338. dispatch:
  8339. CPU: foreach_tensor_sqrt_slow
  8340. CUDA: foreach_tensor_sqrt_cuda
  8341. - func: _foreach_sqrt_(Tensor(a!)[] self) -> ()
  8342. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8343. variants: function
  8344. dispatch:
  8345. CPU: foreach_tensor_sqrt_slow_
  8346. CUDA: foreach_tensor_sqrt_cuda_
  8347. autogen: _foreach_sqrt.out
  8348. - func: _foreach_abs(Tensor[] self) -> Tensor[]
  8349. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8350. variants: function
  8351. dispatch:
  8352. CPU: foreach_tensor_abs_slow
  8353. CUDA: foreach_tensor_abs_cuda
  8354. - func: _foreach_abs_(Tensor(a!)[] self) -> ()
  8355. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8356. variants: function
  8357. dispatch:
  8358. CPU: foreach_tensor_abs_slow_
  8359. CUDA: foreach_tensor_abs_cuda_
  8360. autogen: _foreach_abs.out
  8361. - func: _foreach_acos(Tensor[] self) -> Tensor[]
  8362. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8363. variants: function
  8364. dispatch:
  8365. CPU: foreach_tensor_acos_slow
  8366. CUDA: foreach_tensor_acos_cuda
  8367. - func: _foreach_acos_(Tensor(a!)[] self) -> ()
  8368. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8369. variants: function
  8370. dispatch:
  8371. CPU: foreach_tensor_acos_slow_
  8372. CUDA: foreach_tensor_acos_cuda_
  8373. autogen: _foreach_acos.out
  8374. - func: _foreach_asin(Tensor[] self) -> Tensor[]
  8375. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8376. variants: function
  8377. dispatch:
  8378. CPU: foreach_tensor_asin_slow
  8379. CUDA: foreach_tensor_asin_cuda
  8380. - func: _foreach_asin_(Tensor(a!)[] self) -> ()
  8381. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8382. variants: function
  8383. dispatch:
  8384. CPU: foreach_tensor_asin_slow_
  8385. CUDA: foreach_tensor_asin_cuda_
  8386. autogen: _foreach_asin.out
  8387. - func: _foreach_atan(Tensor[] self) -> Tensor[]
  8388. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8389. variants: function
  8390. dispatch:
  8391. CPU: foreach_tensor_atan_slow
  8392. CUDA: foreach_tensor_atan_cuda
  8393. - func: _foreach_atan_(Tensor(a!)[] self) -> ()
  8394. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8395. variants: function
  8396. dispatch:
  8397. CPU: foreach_tensor_atan_slow_
  8398. CUDA: foreach_tensor_atan_cuda_
  8399. autogen: _foreach_atan.out
  8400. - func: _foreach_ceil(Tensor[] self) -> Tensor[]
  8401. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8402. variants: function
  8403. dispatch:
  8404. CPU: foreach_tensor_ceil_slow
  8405. CUDA: foreach_tensor_ceil_cuda
  8406. - func: _foreach_ceil_(Tensor(a!)[] self) -> ()
  8407. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8408. variants: function
  8409. dispatch:
  8410. CPU: foreach_tensor_ceil_slow_
  8411. CUDA: foreach_tensor_ceil_cuda_
  8412. autogen: _foreach_ceil.out
  8413. - func: _foreach_cos(Tensor[] self) -> Tensor[]
  8414. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8415. variants: function
  8416. dispatch:
  8417. CPU: foreach_tensor_cos_slow
  8418. CUDA: foreach_tensor_cos_cuda
  8419. - func: _foreach_cos_(Tensor(a!)[] self) -> ()
  8420. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8421. variants: function
  8422. dispatch:
  8423. CPU: foreach_tensor_cos_slow_
  8424. CUDA: foreach_tensor_cos_cuda_
  8425. autogen: _foreach_cos.out
  8426. - func: _foreach_cosh(Tensor[] self) -> Tensor[]
  8427. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8428. variants: function
  8429. dispatch:
  8430. CPU: foreach_tensor_cosh_slow
  8431. CUDA: foreach_tensor_cosh_cuda
  8432. - func: _foreach_cosh_(Tensor(a!)[] self) -> ()
  8433. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8434. variants: function
  8435. dispatch:
  8436. CPU: foreach_tensor_cosh_slow_
  8437. CUDA: foreach_tensor_cosh_cuda_
  8438. autogen: _foreach_cosh.out
  8439. - func: _foreach_erf(Tensor[] self) -> Tensor[]
  8440. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8441. variants: function
  8442. dispatch:
  8443. CPU: foreach_tensor_erf_slow
  8444. CUDA: foreach_tensor_erf_cuda
  8445. - func: _foreach_erf_(Tensor(a!)[] self) -> ()
  8446. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8447. variants: function
  8448. dispatch:
  8449. CPU: foreach_tensor_erf_slow_
  8450. CUDA: foreach_tensor_erf_cuda_
  8451. autogen: _foreach_erf.out
  8452. - func: _foreach_erfc(Tensor[] self) -> Tensor[]
  8453. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8454. variants: function
  8455. dispatch:
  8456. CPU: foreach_tensor_erfc_slow
  8457. CUDA: foreach_tensor_erfc_cuda
  8458. - func: _foreach_erfc_(Tensor(a!)[] self) -> ()
  8459. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8460. variants: function
  8461. dispatch:
  8462. CPU: foreach_tensor_erfc_slow_
  8463. CUDA: foreach_tensor_erfc_cuda_
  8464. autogen: _foreach_erfc.out
  8465. - func: _foreach_expm1(Tensor[] self) -> Tensor[]
  8466. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8467. variants: function
  8468. dispatch:
  8469. CPU: foreach_tensor_expm1_slow
  8470. CUDA: foreach_tensor_expm1_cuda
  8471. - func: _foreach_expm1_(Tensor(a!)[] self) -> ()
  8472. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8473. variants: function
  8474. dispatch:
  8475. CPU: foreach_tensor_expm1_slow_
  8476. CUDA: foreach_tensor_expm1_cuda_
  8477. autogen: _foreach_expm1.out
  8478. - func: _foreach_floor(Tensor[] self) -> Tensor[]
  8479. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8480. variants: function
  8481. dispatch:
  8482. CPU: foreach_tensor_floor_slow
  8483. CUDA: foreach_tensor_floor_cuda
  8484. - func: _foreach_floor_(Tensor(a!)[] self) -> ()
  8485. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8486. variants: function
  8487. dispatch:
  8488. CPU: foreach_tensor_floor_slow_
  8489. CUDA: foreach_tensor_floor_cuda_
  8490. autogen: _foreach_floor.out
  8491. - func: _foreach_log(Tensor[] self) -> Tensor[]
  8492. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8493. variants: function
  8494. dispatch:
  8495. CPU: foreach_tensor_log_slow
  8496. CUDA: foreach_tensor_log_cuda
  8497. - func: _foreach_log_(Tensor(a!)[] self) -> ()
  8498. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8499. variants: function
  8500. dispatch:
  8501. CPU: foreach_tensor_log_slow_
  8502. CUDA: foreach_tensor_log_cuda_
  8503. autogen: _foreach_log.out
  8504. - func: _foreach_log10(Tensor[] self) -> Tensor[]
  8505. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8506. variants: function
  8507. dispatch:
  8508. CPU: foreach_tensor_log10_slow
  8509. CUDA: foreach_tensor_log10_cuda
  8510. - func: _foreach_log10_(Tensor(a!)[] self) -> ()
  8511. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8512. variants: function
  8513. dispatch:
  8514. CPU: foreach_tensor_log10_slow_
  8515. CUDA: foreach_tensor_log10_cuda_
  8516. autogen: _foreach_log10.out
  8517. - func: _foreach_log1p(Tensor[] self) -> Tensor[]
  8518. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8519. variants: function
  8520. dispatch:
  8521. CPU: foreach_tensor_log1p_slow
  8522. CUDA: foreach_tensor_log1p_cuda
  8523. - func: _foreach_log1p_(Tensor(a!)[] self) -> ()
  8524. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8525. variants: function
  8526. dispatch:
  8527. CPU: foreach_tensor_log1p_slow_
  8528. CUDA: foreach_tensor_log1p_cuda_
  8529. autogen: _foreach_log1p.out
  8530. - func: _foreach_log2(Tensor[] self) -> Tensor[]
  8531. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8532. variants: function
  8533. dispatch:
  8534. CPU: foreach_tensor_log2_slow
  8535. CUDA: foreach_tensor_log2_cuda
  8536. - func: _foreach_log2_(Tensor(a!)[] self) -> ()
  8537. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8538. variants: function
  8539. dispatch:
  8540. CPU: foreach_tensor_log2_slow_
  8541. CUDA: foreach_tensor_log2_cuda_
  8542. autogen: _foreach_log2.out
  8543. - func: _foreach_neg(Tensor[] self) -> Tensor[]
  8544. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8545. variants: function
  8546. dispatch:
  8547. CPU: foreach_tensor_neg_slow
  8548. CUDA: foreach_tensor_neg_cuda
  8549. - func: _foreach_neg_(Tensor(a!)[] self) -> ()
  8550. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8551. variants: function
  8552. dispatch:
  8553. CPU: foreach_tensor_neg_slow_
  8554. CUDA: foreach_tensor_neg_cuda_
  8555. autogen: _foreach_neg.out
  8556. - func: _foreach_tan(Tensor[] self) -> Tensor[]
  8557. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8558. variants: function
  8559. dispatch:
  8560. CPU: foreach_tensor_tan_slow
  8561. CUDA: foreach_tensor_tan_cuda
  8562. - func: _foreach_tan_(Tensor(a!)[] self) -> ()
  8563. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8564. variants: function
  8565. dispatch:
  8566. CPU: foreach_tensor_tan_slow_
  8567. CUDA: foreach_tensor_tan_cuda_
  8568. autogen: _foreach_tan.out
  8569. - func: _foreach_tanh(Tensor[] self) -> Tensor[]
  8570. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8571. variants: function
  8572. dispatch:
  8573. CPU: foreach_tensor_tanh_slow
  8574. CUDA: foreach_tensor_tanh_cuda
  8575. - func: _foreach_tanh_(Tensor(a!)[] self) -> ()
  8576. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8577. variants: function
  8578. dispatch:
  8579. CPU: foreach_tensor_tanh_slow_
  8580. CUDA: foreach_tensor_tanh_cuda_
  8581. autogen: _foreach_tanh.out
  8582. - func: _foreach_sin(Tensor[] self) -> Tensor[]
  8583. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8584. variants: function
  8585. dispatch:
  8586. CPU: foreach_tensor_sin_slow
  8587. CUDA: foreach_tensor_sin_cuda
  8588. - func: _foreach_sin_(Tensor(a!)[] self) -> ()
  8589. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8590. variants: function
  8591. dispatch:
  8592. CPU: foreach_tensor_sin_slow_
  8593. CUDA: foreach_tensor_sin_cuda_
  8594. autogen: _foreach_sin.out
  8595. - func: _foreach_sinh(Tensor[] self) -> Tensor[]
  8596. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8597. variants: function
  8598. dispatch:
  8599. CPU: foreach_tensor_sinh_slow
  8600. CUDA: foreach_tensor_sinh_cuda
  8601. - func: _foreach_sinh_(Tensor(a!)[] self) -> ()
  8602. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8603. variants: function
  8604. dispatch:
  8605. CPU: foreach_tensor_sinh_slow_
  8606. CUDA: foreach_tensor_sinh_cuda_
  8607. autogen: _foreach_sinh.out
  8608. - func: _foreach_round(Tensor[] self) -> Tensor[]
  8609. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8610. variants: function
  8611. dispatch:
  8612. CPU: foreach_tensor_round_slow
  8613. CUDA: foreach_tensor_round_cuda
  8614. - func: _foreach_round_(Tensor(a!)[] self) -> ()
  8615. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8616. variants: function
  8617. dispatch:
  8618. CPU: foreach_tensor_round_slow_
  8619. CUDA: foreach_tensor_round_cuda_
  8620. autogen: _foreach_round.out
  8621. - func: _foreach_lgamma(Tensor[] self) -> Tensor[]
  8622. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8623. variants: function
  8624. dispatch:
  8625. CPU: foreach_tensor_lgamma_slow
  8626. CUDA: foreach_tensor_lgamma_cuda
  8627. - func: _foreach_lgamma_(Tensor(a!)[] self) -> ()
  8628. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8629. variants: function
  8630. dispatch:
  8631. CPU: foreach_tensor_lgamma_slow_
  8632. CUDA: foreach_tensor_lgamma_cuda_
  8633. autogen: _foreach_lgamma.out
  8634. - func: _foreach_frac(Tensor[] self) -> Tensor[]
  8635. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8636. variants: function
  8637. dispatch:
  8638. CPU: foreach_tensor_frac_slow
  8639. CUDA: foreach_tensor_frac_cuda
  8640. - func: _foreach_frac_(Tensor(a!)[] self) -> ()
  8641. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8642. variants: function
  8643. dispatch:
  8644. CPU: foreach_tensor_frac_slow_
  8645. CUDA: foreach_tensor_frac_cuda_
  8646. autogen: _foreach_frac.out
  8647. - func: _foreach_reciprocal(Tensor[] self) -> Tensor[]
  8648. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8649. variants: function
  8650. dispatch:
  8651. CPU: foreach_tensor_reciprocal_slow
  8652. CUDA: foreach_tensor_reciprocal_cuda
  8653. - func: _foreach_reciprocal_(Tensor(a!)[] self) -> ()
  8654. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8655. variants: function
  8656. dispatch:
  8657. CPU: foreach_tensor_reciprocal_slow_
  8658. CUDA: foreach_tensor_reciprocal_cuda_
  8659. autogen: _foreach_reciprocal.out
  8660. - func: _foreach_sigmoid(Tensor[] self) -> Tensor[]
  8661. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8662. variants: function
  8663. dispatch:
  8664. CPU: foreach_tensor_sigmoid_slow
  8665. CUDA: foreach_tensor_sigmoid_cuda
  8666. - func: _foreach_sigmoid_(Tensor(a!)[] self) -> ()
  8667. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8668. variants: function
  8669. dispatch:
  8670. CPU: foreach_tensor_sigmoid_slow_
  8671. CUDA: foreach_tensor_sigmoid_cuda_
  8672. autogen: _foreach_sigmoid.out
  8673. - func: _foreach_trunc(Tensor[] self) -> Tensor[]
  8674. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8675. variants: function
  8676. dispatch:
  8677. CPU: foreach_tensor_trunc_slow
  8678. CUDA: foreach_tensor_trunc_cuda
  8679. - func: _foreach_trunc_(Tensor(a!)[] self) -> ()
  8680. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8681. variants: function
  8682. dispatch:
  8683. CPU: foreach_tensor_trunc_slow_
  8684. CUDA: foreach_tensor_trunc_cuda_
  8685. autogen: _foreach_trunc.out
  8686. - func: _foreach_addcdiv_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> ()
  8687. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8688. variants: function
  8689. dispatch:
  8690. CPU: foreach_tensor_addcdiv_scalar_slow_
  8691. CUDA: foreach_tensor_addcdiv_scalar_cuda_
  8692. autogen: _foreach_addcdiv.Scalar_out
  8693. - func: _foreach_addcmul_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> ()
  8694. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8695. variants: function
  8696. dispatch:
  8697. CPU: foreach_tensor_addcmul_scalar_slow_
  8698. CUDA: foreach_tensor_addcmul_scalar_cuda_
  8699. autogen: _foreach_addcmul.Scalar_out
  8700. - func: _foreach_addcdiv_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> ()
  8701. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8702. variants: function
  8703. dispatch:
  8704. CPU: foreach_tensor_addcdiv_scalarlist_slow_
  8705. CUDA: foreach_tensor_addcdiv_scalarlist_cuda_
  8706. autogen: _foreach_addcdiv.ScalarList_out
  8707. - func: _foreach_addcdiv_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> ()
  8708. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8709. variants: function
  8710. dispatch:
  8711. CPU: foreach_tensor_addcdiv_tensor_slow_
  8712. CUDA: foreach_tensor_addcdiv_tensor_cuda_
  8713. autogen: _foreach_addcdiv.Tensor_out
  8714. - func: _foreach_addcmul_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> ()
  8715. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8716. variants: function
  8717. dispatch:
  8718. CPU: foreach_tensor_addcmul_scalarlist_slow_
  8719. CUDA: foreach_tensor_addcmul_scalarlist_cuda_
  8720. autogen: _foreach_addcmul.ScalarList_out
  8721. - func: _foreach_addcmul_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> ()
  8722. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8723. variants: function
  8724. dispatch:
  8725. CPU: foreach_tensor_addcmul_tensor_slow_
  8726. CUDA: foreach_tensor_addcmul_tensor_cuda_
  8727. autogen: _foreach_addcmul.Tensor_out
  8728. - func: _foreach_addcdiv.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[]
  8729. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8730. variants: function
  8731. dispatch:
  8732. CPU: foreach_tensor_addcdiv_scalar_slow
  8733. CUDA: foreach_tensor_addcdiv_scalar_cuda
  8734. - func: _foreach_addcmul.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[]
  8735. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8736. variants: function
  8737. dispatch:
  8738. CPU: foreach_tensor_addcmul_scalar_slow
  8739. CUDA: foreach_tensor_addcmul_scalar_cuda
  8740. - func: _foreach_addcdiv.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[]
  8741. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8742. variants: function
  8743. dispatch:
  8744. CPU: foreach_tensor_addcdiv_scalarlist_slow
  8745. CUDA: foreach_tensor_addcdiv_scalarlist_cuda
  8746. - func: _foreach_addcdiv.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[]
  8747. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8748. variants: function
  8749. dispatch:
  8750. CPU: foreach_tensor_addcdiv_tensor_slow
  8751. CUDA: foreach_tensor_addcdiv_tensor_cuda
  8752. - func: _foreach_addcmul.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[]
  8753. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8754. variants: function
  8755. dispatch:
  8756. CPU: foreach_tensor_addcmul_scalarlist_slow
  8757. CUDA: foreach_tensor_addcmul_scalarlist_cuda
  8758. - func: _foreach_addcmul.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[]
  8759. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8760. variants: function
  8761. dispatch:
  8762. CPU: foreach_tensor_addcmul_tensor_slow
  8763. CUDA: foreach_tensor_addcmul_tensor_cuda
  8764. - func: _foreach_norm.Scalar(Tensor[] self, Scalar ord=2) -> Tensor[]
  8765. device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
  8766. variants: function
  8767. dispatch:
  8768. CPU: foreach_tensor_norm_slow
  8769. CUDA: foreach_tensor_norm_cuda
  8770. autogen: _foreach_norm.Scalar_out
  8771. - func: _foreach_lerp.List(Tensor[] self, Tensor[] tensors1, Tensor[] weights) -> Tensor[]
  8772. device_check: NoCheck # foreach kernels fall back to slow path when tensors are on different devices
  8773. variants: function
  8774. dispatch:
  8775. CPU: foreach_tensor_ternary_lerp_slow
  8776. CUDA: foreach_tensor_lerp_ternary_cuda
  8777. autogen: _foreach_lerp.List_out
  8778. - func: _foreach_lerp_.List(Tensor(a!)[] self, Tensor[] tensors1, Tensor[] weights) -> ()
  8779. device_check: NoCheck # foreach kernels fall back to slow path when tensors are on different devices
  8780. variants: function
  8781. dispatch:
  8782. CPU: foreach_tensor_ternary_lerp_slow_
  8783. CUDA: foreach_tensor_lerp_ternary_cuda_
  8784. autogen: _foreach_lerp.List_out
  8785. - func: _foreach_lerp.Scalar(Tensor[] self, Tensor[] tensors1, Scalar weight) -> Tensor[]
  8786. device_check: NoCheck # foreach kernels fall back to slow path when tensors are on different devices
  8787. variants: function
  8788. dispatch:
  8789. CPU: foreach_tensor_lerp_list_kernel_slow
  8790. CUDA: foreach_tensor_lerp_list_cuda
  8791. autogen: _foreach_lerp.Scalar_out
  8792. - func: _foreach_lerp_.Scalar(Tensor(a!)[] self, Tensor[] tensors1, Scalar weight) -> ()
  8793. device_check: NoCheck # foreach kernels fall back to slow path when tensors are on different devices
  8794. variants: function
  8795. dispatch:
  8796. CPU: foreach_tensor_lerp_list_kernel_slow_
  8797. CUDA: foreach_tensor_lerp_list_cuda_
  8798. autogen: _foreach_lerp.Scalar_out
  8799. - func: bucketize.Tensor(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor
  8800. dispatch:
  8801. CPU: bucketize_cpu
  8802. CUDA: bucketize_cuda
  8803. - func: bucketize.Tensor_out(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!)
  8804. dispatch:
  8805. CPU: bucketize_out_cpu
  8806. CUDA: bucketize_out_cuda
  8807. - func: bucketize.Scalar(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor
  8808. dispatch:
  8809. CPU: bucketize_cpu
  8810. CUDA: bucketize_cuda
  8811. autogen: bucketize.Scalar_out
  8812. - func: searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor
  8813. dispatch:
  8814. CPU: searchsorted_cpu
  8815. CUDA: searchsorted_cuda
  8816. - func: searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)
  8817. dispatch:
  8818. CPU: searchsorted_out_cpu
  8819. CUDA: searchsorted_out_cuda
  8820. - func: searchsorted.Scalar(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor
  8821. dispatch:
  8822. CPU: searchsorted_cpu
  8823. CUDA: searchsorted_cuda
  8824. autogen: searchsorted.Scalar_out
  8825. - func: _convert_indices_from_coo_to_csr(Tensor self, int size, *, bool out_int32=False) -> Tensor
  8826. structured_delegate: _convert_indices_from_coo_to_csr.out
  8827. - func: _convert_indices_from_coo_to_csr.out(Tensor self, int size, *, bool out_int32=False, Tensor(a!) out) -> Tensor(a!)
  8828. structured: True
  8829. dispatch:
  8830. CPU: _convert_indices_from_coo_to_csr_structured_cpu
  8831. CUDA: _convert_indices_from_coo_to_csr_structured_cuda
  8832. - func: _convert_indices_from_csr_to_coo(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False) -> Tensor
  8833. structured_delegate: _convert_indices_from_csr_to_coo.out
  8834. - func: _convert_indices_from_csr_to_coo.out(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False, Tensor(a!) out) -> Tensor(a!)
  8835. structured: True
  8836. dispatch:
  8837. CPU: _convert_indices_from_csr_to_coo_structured_cpu
  8838. CUDA: _convert_indices_from_csr_to_coo_structured_cuda
  8839. ## NN wrappers
  8840. - func: mse_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
  8841. device_check: NoCheck # TensorIterator
  8842. structured: True
  8843. structured_inherits: TensorIteratorBase
  8844. python_module: nn
  8845. dispatch:
  8846. CPU, CUDA: mse_loss_out
  8847. MPS: mse_loss_out_mps
  8848. - func: mse_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
  8849. device_check: NoCheck # TensorIterator
  8850. structured_delegate: mse_loss.out
  8851. python_module: nn
  8852. - func: mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)
  8853. python_module: nn
  8854. dispatch:
  8855. CPU, CUDA: mse_loss_backward_out
  8856. MPS: mse_loss_backward_out_mps
  8857. - func: mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor
  8858. python_module: nn
  8859. dispatch:
  8860. CPU, CUDA: mse_loss_backward
  8861. MPS: mse_loss_backward_mps
  8862. - func: l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
  8863. python_module: nn
  8864. - func: multi_margin_loss.out(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
  8865. python_module: nn
  8866. dispatch:
  8867. CPU: multi_margin_loss_cpu_out
  8868. CUDA: multi_margin_loss_cuda_out
  8869. - func: multi_margin_loss(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean) -> Tensor
  8870. python_module: nn
  8871. dispatch:
  8872. CPU: multi_margin_loss_cpu
  8873. CUDA: multi_margin_loss_cuda
  8874. - func: multi_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!)
  8875. python_module: nn
  8876. dispatch:
  8877. CPU: multi_margin_loss_cpu_backward_out
  8878. CUDA: multi_margin_loss_cuda_backward_out
  8879. - func: multi_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean) -> Tensor
  8880. python_module: nn
  8881. dispatch:
  8882. CPU: multi_margin_loss_cpu_backward
  8883. CUDA: multi_margin_loss_cuda_backward
  8884. - func: multilabel_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
  8885. python_module: nn
  8886. - func: multilabel_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
  8887. python_module: nn
  8888. - func: multilabel_margin_loss_forward.output(Tensor self, Tensor target, int reduction, *, Tensor(a!) output, Tensor(b!) is_target) -> (Tensor(a!), Tensor(b!))
  8889. python_module: nn
  8890. dispatch:
  8891. CPU: multilabel_margin_loss_forward_out_cpu
  8892. CUDA: multilabel_margin_loss_forward_out_cuda
  8893. - func: multilabel_margin_loss_forward(Tensor self, Tensor target, int reduction) -> (Tensor output, Tensor is_target)
  8894. python_module: nn
  8895. dispatch:
  8896. CPU: multilabel_margin_loss_forward_cpu
  8897. CUDA: multilabel_margin_loss_forward_cuda
  8898. - func: multilabel_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) -> Tensor(a!)
  8899. python_module: nn
  8900. dispatch:
  8901. CPU: multilabel_margin_loss_backward_cpu_out
  8902. CUDA: multilabel_margin_loss_backward_cuda_out
  8903. - func: multilabel_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target) -> Tensor
  8904. python_module: nn
  8905. dispatch:
  8906. CPU: multilabel_margin_loss_backward_cpu
  8907. CUDA: multilabel_margin_loss_backward_cuda
  8908. - func: nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)
  8909. python_module: nn
  8910. - func: nll_loss_nd(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor
  8911. python_module: nn
  8912. dispatch:
  8913. CompositeImplicitAutograd: nll_loss_nd_symint
  8914. - func: nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor
  8915. python_module: nn
  8916. dispatch:
  8917. CompositeImplicitAutograd: nll_loss_symint
  8918. - func: nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))
  8919. python_module: nn
  8920. structured: True
  8921. dispatch:
  8922. CPU: nll_loss_forward_out_cpu
  8923. CUDA: nll_loss_forward_out_cuda
  8924. MPS: nll_loss_forward_out_mps
  8925. - func: nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight)
  8926. python_module: nn
  8927. structured_delegate: nll_loss_forward.output
  8928. - func: nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)
  8929. python_module: nn
  8930. structured: True
  8931. dispatch:
  8932. CPU: nll_loss_backward_out_cpu
  8933. CUDA: nll_loss_backward_out_cuda
  8934. MPS: nll_loss_backward_out_mps
  8935. - func: nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor
  8936. python_module: nn
  8937. structured_delegate: nll_loss_backward.grad_input
  8938. - func: nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)
  8939. python_module: nn
  8940. - func: nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor
  8941. python_module: nn
  8942. dispatch:
  8943. CompositeImplicitAutograd: nll_loss2d_symint
  8944. - func: nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))
  8945. python_module: nn
  8946. dispatch:
  8947. CPU: nll_loss2d_forward_out_cpu
  8948. CUDA: nll_loss2d_forward_out_cuda
  8949. MPS: nll_loss2d_forward_out_mps
  8950. - func: nll_loss2d_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight)
  8951. python_module: nn
  8952. dispatch:
  8953. CPU: nll_loss2d_forward_cpu
  8954. CUDA: nll_loss2d_forward_cuda
  8955. MPS: nll_loss2d_forward_mps
  8956. - func: nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)
  8957. python_module: nn
  8958. dispatch:
  8959. CPU: nll_loss2d_backward_out_cpu
  8960. CUDA: nll_loss2d_backward_out_cuda
  8961. MPS: nll_loss2d_backward_out_mps
  8962. - func: nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor
  8963. python_module: nn
  8964. dispatch:
  8965. CPU: nll_loss2d_backward_cpu
  8966. CUDA: nll_loss2d_backward_cuda
  8967. MPS: nll_loss2d_backward_mps
  8968. - func: smooth_l1_loss.out(Tensor self, Tensor target, int reduction=Mean, float beta=1.0, *, Tensor(a!) out) -> Tensor(a!)
  8969. device_check: NoCheck # TensorIterator
  8970. structured: True
  8971. structured_inherits: TensorIteratorBase
  8972. python_module: nn
  8973. dispatch:
  8974. CPU, CUDA: smooth_l1_loss_out
  8975. MPS: smooth_l1_loss_out_mps
  8976. - func: smooth_l1_loss(Tensor self, Tensor target, int reduction=Mean, float beta=1.0) -> Tensor
  8977. device_check: NoCheck # TensorIterator
  8978. structured_delegate: smooth_l1_loss.out
  8979. python_module: nn
  8980. - func: smooth_l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta, *, Tensor(a!) grad_input) -> Tensor(a!)
  8981. python_module: nn
  8982. dispatch:
  8983. CPU: smooth_l1_loss_backward_out
  8984. CUDA: smooth_l1_loss_backward_out
  8985. MPS: smooth_l1_loss_backward_out_mps
  8986. - func: smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta) -> Tensor
  8987. python_module: nn
  8988. dispatch:
  8989. CompositeExplicitAutograd: smooth_l1_loss_backward
  8990. - func: huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!)
  8991. python_module: nn
  8992. dispatch:
  8993. CPU, CUDA: huber_loss_out
  8994. MPS: huber_loss_out_mps
  8995. - func: huber_loss(Tensor self, Tensor target, int reduction=Mean, float delta=1.0) -> Tensor
  8996. python_module: nn
  8997. dispatch:
  8998. CPU, CUDA: huber_loss
  8999. MPS: huber_loss_mps
  9000. - func: huber_loss_backward.out(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta, *, Tensor(a!) grad_input) -> Tensor(a!)
  9001. python_module: nn
  9002. dispatch:
  9003. CPU, CUDA: huber_loss_backward_out
  9004. MPS: huber_loss_backward_out_mps
  9005. - func: huber_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta) -> Tensor
  9006. python_module: nn
  9007. dispatch:
  9008. CompositeExplicitAutograd: huber_loss_backward
  9009. - func: soft_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
  9010. python_module: nn
  9011. dispatch:
  9012. CompositeExplicitAutograd: soft_margin_loss_out
  9013. - func: soft_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
  9014. python_module: nn
  9015. dispatch:
  9016. CompositeExplicitAutograd: soft_margin_loss
  9017. - func: soft_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)
  9018. python_module: nn
  9019. dispatch:
  9020. CompositeExplicitAutograd: soft_margin_loss_backward_out
  9021. - func: soft_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor
  9022. python_module: nn
  9023. dispatch:
  9024. CompositeExplicitAutograd: soft_margin_loss_backward
  9025. - func: elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!)
  9026. structured: True
  9027. structured_inherits: TensorIteratorBase
  9028. device_check: NoCheck # TensorIterator
  9029. python_module: nn
  9030. dispatch:
  9031. CPU, CUDA: elu_out
  9032. MPS: elu_out_mps
  9033. - func: elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor
  9034. structured_delegate: elu.out
  9035. device_check: NoCheck # TensorIterator
  9036. python_module: nn
  9037. - func: elu_backward.grad_input(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result, *, Tensor(a!) grad_input) -> Tensor(a!)
  9038. structured: True
  9039. structured_inherits: TensorIteratorBase
  9040. python_module: nn
  9041. dispatch:
  9042. CPU, CUDA: elu_backward_out
  9043. MPS: elu_backward_out_mps
  9044. - func: elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result) -> Tensor
  9045. structured_delegate: elu_backward.grad_input
  9046. python_module: nn
  9047. - func: elu_(Tensor(a!) self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor(a!)
  9048. structured_delegate: elu.out
  9049. device_check: NoCheck # TensorIterator
  9050. python_module: nn
  9051. - func: glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!)
  9052. structured: True
  9053. structured_inherits: TensorIteratorBase
  9054. python_module: nn
  9055. dispatch:
  9056. CPU, CUDA: glu_out
  9057. MPS: glu_out_mps
  9058. - func: glu(Tensor self, int dim=-1) -> Tensor
  9059. structured_delegate: glu.out
  9060. device_check: NoCheck # TensorIterator
  9061. python_module: nn
  9062. - func: glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!)
  9063. python_module: nn
  9064. dispatch:
  9065. CPU: glu_backward_cpu_out
  9066. CUDA: glu_backward_cuda_out
  9067. MPS: glu_backward_mps_out
  9068. - func: glu_backward(Tensor grad_output, Tensor self, int dim) -> Tensor
  9069. python_module: nn
  9070. dispatch:
  9071. CPU: glu_backward_cpu
  9072. CUDA: glu_backward_cuda
  9073. MPS: glu_backward_mps
  9074. - func: glu_jvp(Tensor glu, Tensor x, Tensor dx, int dim) -> Tensor
  9075. python_module: nn
  9076. dispatch:
  9077. CPU, CUDA: glu_jvp
  9078. autogen: glu_jvp.out
  9079. - func: glu_backward_jvp(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim) -> Tensor
  9080. python_module: nn
  9081. dispatch:
  9082. CPU, CUDA: glu_backward_jvp
  9083. autogen: glu_backward_jvp.out
  9084. - func: hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  9085. structured: True
  9086. structured_inherits: TensorIteratorBase
  9087. device_check: NoCheck # TensorIterator
  9088. python_module: nn
  9089. dispatch:
  9090. CPU, CUDA: hardsigmoid_out
  9091. QuantizedCPU: hardsigmoid_out_quantized_cpu
  9092. - func: hardsigmoid(Tensor self) -> Tensor
  9093. structured_delegate: hardsigmoid.out
  9094. device_check: NoCheck # TensorIterator
  9095. python_module: nn
  9096. dispatch:
  9097. QuantizedCPU: hardsigmoid_quantized_cpu
  9098. - func: hardsigmoid_(Tensor(a!) self) -> Tensor(a!)
  9099. structured_delegate: hardsigmoid.out
  9100. device_check: NoCheck # TensorIterator
  9101. python_module: nn
  9102. - func: hardsigmoid_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)
  9103. structured: True
  9104. structured_inherits: TensorIteratorBase
  9105. python_module: nn
  9106. dispatch:
  9107. CPU, CUDA: hardsigmoid_backward_out
  9108. - func: hardsigmoid_backward(Tensor grad_output, Tensor self) -> Tensor
  9109. structured_delegate: hardsigmoid_backward.grad_input
  9110. python_module: nn
  9111. - func: hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!)
  9112. device_check: NoCheck # TensorIterator
  9113. python_module: nn
  9114. dispatch:
  9115. CPU, CUDA, MPS: hardtanh_out
  9116. QuantizedCPU: hardtanh_out_quantized_cpu
  9117. - func: hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor
  9118. device_check: NoCheck # TensorIterator
  9119. python_module: nn
  9120. dispatch:
  9121. CPU, CUDA, MPS: hardtanh
  9122. QuantizedCPU: hardtanh_quantized_cpu
  9123. tags: core
  9124. - func: hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!)
  9125. python_module: nn
  9126. dispatch:
  9127. CPU, CUDA: hardtanh_backward_out
  9128. MPS: hardtanh_backward_out_mps
  9129. - func: hardtanh_backward(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val) -> Tensor
  9130. python_module: nn
  9131. dispatch:
  9132. CPU, CUDA: hardtanh_backward
  9133. MPS: hardtanh_backward_mps
  9134. - func: hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!)
  9135. device_check: NoCheck # TensorIterator
  9136. python_module: nn
  9137. dispatch:
  9138. CPU, CUDA, MPS: hardtanh_
  9139. QuantizedCPU: hardtanh_quantized_cpu_
  9140. - func: hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  9141. device_check: NoCheck # TensorIterator
  9142. python_module: nn
  9143. dispatch:
  9144. CPU, CUDA: hardswish_out
  9145. MPS: hardswish_out_mps
  9146. - func: hardswish(Tensor self) -> Tensor
  9147. device_check: NoCheck # TensorIterator
  9148. python_module: nn
  9149. dispatch:
  9150. CPU, CUDA: hardswish
  9151. MPS: hardswish_mps
  9152. - func: hardswish_(Tensor(a!) self) -> Tensor(a!)
  9153. device_check: NoCheck # TensorIterator
  9154. python_module: nn
  9155. dispatch:
  9156. CPU, CUDA: hardswish_
  9157. MPS: hardswish_mps_
  9158. - func: hardswish_backward(Tensor grad_output, Tensor self) -> Tensor
  9159. python_module: nn
  9160. dispatch:
  9161. CPU, CUDA: hardswish_backward
  9162. MPS: hardswish_backward_mps
  9163. autogen: hardswish_backward.out
  9164. - func: leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!)
  9165. structured: True
  9166. structured_inherits: TensorIteratorBase
  9167. device_check: NoCheck # TensorIterator
  9168. python_module: nn
  9169. dispatch:
  9170. CPU, CUDA: leaky_relu_out
  9171. MPS: leaky_relu_out_mps
  9172. QuantizedCPU: leaky_relu_out_quantized_cpu
  9173. - func: leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor
  9174. structured_delegate: leaky_relu.out
  9175. device_check: NoCheck # TensorIterator
  9176. python_module: nn
  9177. dispatch:
  9178. QuantizedCPU: leaky_relu_quantized_cpu
  9179. tags: core
  9180. - func: leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!)
  9181. structured: True
  9182. structured_inherits: TensorIteratorBase
  9183. python_module: nn
  9184. dispatch:
  9185. CPU, CUDA: leaky_relu_backward_out
  9186. MPS: leaky_relu_backward_out_mps
  9187. - func: leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor
  9188. structured_delegate: leaky_relu_backward.grad_input
  9189. python_module: nn
  9190. - func: leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!)
  9191. structured_delegate: leaky_relu.out
  9192. device_check: NoCheck # TensorIterator
  9193. python_module: nn
  9194. dispatch:
  9195. QuantizedCPU: leaky_relu_quantized_cpu_
  9196. - func: log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  9197. device_check: NoCheck # TensorIterator
  9198. python_module: nn
  9199. - func: log_sigmoid(Tensor self) -> Tensor
  9200. device_check: NoCheck # TensorIterator
  9201. python_module: nn
  9202. - func: log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!))
  9203. device_check: NoCheck # TensorIterator
  9204. python_module: nn
  9205. dispatch:
  9206. CPU: log_sigmoid_forward_out_cpu
  9207. CUDA: log_sigmoid_forward_out_cuda
  9208. - func: log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer)
  9209. device_check: NoCheck # TensorIterator
  9210. python_module: nn
  9211. dispatch:
  9212. CPU: log_sigmoid_forward_cpu
  9213. CUDA: log_sigmoid_forward_cuda
  9214. - func: log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!)
  9215. python_module: nn
  9216. dispatch:
  9217. CPU: log_sigmoid_backward_cpu_out
  9218. CUDA: log_sigmoid_backward_cuda_out
  9219. - func: log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor
  9220. python_module: nn
  9221. dispatch:
  9222. CPU: log_sigmoid_backward_cpu
  9223. CUDA: log_sigmoid_backward_cuda
  9224. - func: rrelu_with_noise.out(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
  9225. python_module: nn
  9226. tags: nondeterministic_seeded
  9227. dispatch:
  9228. CPU: rrelu_with_noise_out_cpu
  9229. CUDA: rrelu_with_noise_out_cuda
  9230. - func: rrelu_with_noise(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor
  9231. python_module: nn
  9232. dispatch:
  9233. CPU: rrelu_with_noise_cpu
  9234. CUDA: rrelu_with_noise_cuda
  9235. tags: nondeterministic_seeded
  9236. - func: rrelu_with_noise_backward(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result) -> Tensor
  9237. python_module: nn
  9238. dispatch:
  9239. CompositeExplicitAutograd: rrelu_with_noise_backward
  9240. autogen: rrelu_with_noise_backward.out
  9241. - func: rrelu_with_noise_(Tensor(a!) self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!)
  9242. python_module: nn
  9243. tags: nondeterministic_seeded
  9244. dispatch:
  9245. CPU: rrelu_with_noise_cpu_
  9246. CUDA: rrelu_with_noise_cuda_
  9247. - func: softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!)
  9248. structured: True
  9249. structured_inherits: TensorIteratorBase
  9250. device_check: NoCheck # TensorIterator
  9251. python_module: nn
  9252. dispatch:
  9253. CPU, CUDA: softplus_out
  9254. MPS: softplus_out_mps
  9255. - func: softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor
  9256. structured_delegate: softplus.out
  9257. device_check: NoCheck # TensorIterator
  9258. python_module: nn
  9259. - func: softplus_backward.grad_input(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!)
  9260. structured: True
  9261. structured_inherits: TensorIteratorBase
  9262. python_module: nn
  9263. dispatch:
  9264. CPU, CUDA: softplus_backward_out
  9265. MPS: softplus_backward_out_mps
  9266. - func: softplus_backward(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold) -> Tensor
  9267. structured_delegate: softplus_backward.grad_input
  9268. python_module: nn
  9269. - func: softshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)
  9270. structured: True
  9271. structured_inherits: TensorIteratorBase
  9272. device_check: NoCheck # TensorIterator
  9273. python_module: nn
  9274. dispatch:
  9275. CPU, CUDA: softshrink_out
  9276. - func: softshrink(Tensor self, Scalar lambd=0.5) -> Tensor
  9277. structured_delegate: softshrink.out
  9278. device_check: NoCheck # TensorIterator
  9279. python_module: nn
  9280. - func: softshrink_backward.grad_input(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)
  9281. structured: True
  9282. structured_inherits: TensorIteratorBase
  9283. python_module: nn
  9284. dispatch:
  9285. CPU, CUDA: softshrink_backward_out
  9286. - func: softshrink_backward(Tensor grad_output, Tensor self, Scalar lambd) -> Tensor
  9287. structured_delegate: softshrink_backward.grad_input
  9288. python_module: nn
  9289. - func: adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
  9290. python_module: nn
  9291. dispatch:
  9292. CPU: adaptive_avg_pool2d_out_cpu
  9293. CUDA: adaptive_avg_pool2d_out_cuda
  9294. MPS: adaptive_avg_pool2d_out_mps
  9295. MkldnnCPU: mkldnn_adaptive_avg_pool2d_out_stub
  9296. - func: adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor
  9297. python_module: nn
  9298. dispatch:
  9299. CompositeImplicitAutograd: adaptive_avg_pool2d_symint
  9300. - func: mkldnn_adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor
  9301. dispatch:
  9302. MkldnnCPU: mkldnn_adaptive_avg_pool2d
  9303. - func: mkldnn_adaptive_avg_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
  9304. dispatch:
  9305. MkldnnCPU: mkldnn_adaptive_avg_pool2d_out
  9306. - func: mkldnn_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor
  9307. dispatch:
  9308. MkldnnCPU: mkldnn_adaptive_avg_pool2d_backward
  9309. autogen: mkldnn_adaptive_avg_pool2d_backward.out
  9310. - func: _adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor
  9311. dispatch:
  9312. CPU: adaptive_avg_pool2d_cpu
  9313. CUDA: adaptive_avg_pool2d_cuda
  9314. MPS: adaptive_avg_pool2d_mps
  9315. QuantizedCPU: adaptive_avg_pool2d_quantized_cpu
  9316. QuantizedCUDA: adaptive_avg_pool2d_quantized_cuda
  9317. autogen: _adaptive_avg_pool2d.out
  9318. tags: core
  9319. - func: _adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor
  9320. python_module: nn
  9321. dispatch:
  9322. CPU: adaptive_avg_pool2d_backward_cpu
  9323. CUDA: adaptive_avg_pool2d_backward_cuda
  9324. MPS: adaptive_avg_pool2d_backward_mps
  9325. autogen: _adaptive_avg_pool2d_backward.out
  9326. tags: core
  9327. - func: adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!)
  9328. python_module: nn
  9329. dispatch:
  9330. CPU: adaptive_avg_pool3d_out_cpu
  9331. CUDA: adaptive_avg_pool3d_out_cuda
  9332. QuantizedCPU: adaptive_avg_pool3d_out_quantized_cpu
  9333. - func: adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor
  9334. python_module: nn
  9335. dispatch:
  9336. CompositeImplicitAutograd: adaptive_avg_pool3d_symint
  9337. - func: _adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor
  9338. dispatch:
  9339. CPU: adaptive_avg_pool3d_cpu
  9340. CUDA: adaptive_avg_pool3d_cuda
  9341. QuantizedCPU: adaptive_avg_pool3d_quantized_cpu
  9342. autogen: _adaptive_avg_pool3d.out
  9343. - func: adaptive_avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)
  9344. python_module: nn
  9345. dispatch:
  9346. CPU: adaptive_avg_pool3d_backward_out_cpu
  9347. CUDA: adaptive_avg_pool3d_backward_out_cuda
  9348. - func: _adaptive_avg_pool3d_backward(Tensor grad_output, Tensor self) -> Tensor
  9349. python_module: nn
  9350. dispatch:
  9351. CPU: adaptive_avg_pool3d_backward_cpu
  9352. CUDA: adaptive_avg_pool3d_backward_cuda
  9353. autogen: _adaptive_avg_pool3d_backward.out
  9354. # Return: (Tensor output, Tensor indices)
  9355. - func: adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
  9356. python_module: nn
  9357. structured: True
  9358. dispatch:
  9359. CPU: adaptive_max_pool2d_out_cpu
  9360. CUDA: adaptive_max_pool2d_out_cuda
  9361. MPS: adaptive_max_pool2d_out_mps
  9362. # Return: (Tensor output, Tensor indices)
  9363. - func: adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor)
  9364. python_module: nn
  9365. structured_delegate: adaptive_max_pool2d.out
  9366. - func: adaptive_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
  9367. python_module: nn
  9368. structured: True
  9369. dispatch:
  9370. CPU: adaptive_max_pool2d_backward_out_cpu
  9371. CUDA: adaptive_max_pool2d_backward_out_cuda
  9372. MPS: adaptive_max_pool2d_backward_out_mps
  9373. - func: adaptive_max_pool2d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor
  9374. python_module: nn
  9375. structured_delegate: adaptive_max_pool2d_backward.grad_input
  9376. # Return: (Tensor output, Tensor indices)
  9377. - func: adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
  9378. python_module: nn
  9379. structured: True
  9380. dispatch:
  9381. CPU: adaptive_max_pool3d_out_cpu
  9382. CUDA: adaptive_max_pool3d_out_cuda
  9383. # Return: (Tensor output, Tensor indices)
  9384. - func: adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor)
  9385. python_module: nn
  9386. structured_delegate: adaptive_max_pool3d.out
  9387. - func: adaptive_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
  9388. python_module: nn
  9389. structured: True
  9390. dispatch:
  9391. CPU: adaptive_max_pool3d_backward_out_cpu
  9392. CUDA: adaptive_max_pool3d_backward_out_cuda
  9393. - func: adaptive_max_pool3d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor
  9394. python_module: nn
  9395. structured_delegate: adaptive_max_pool3d_backward.grad_input
  9396. - func: avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)
  9397. python_module: nn
  9398. structured: True
  9399. precomputed:
  9400. - kernel_size -> int kH, int kW
  9401. - stride -> int dH, int dW
  9402. - padding -> int padH, int padW
  9403. dispatch:
  9404. CPU: avg_pool2d_out_cpu
  9405. CUDA: avg_pool2d_out_cuda
  9406. MPS: avg_pool2d_out_mps
  9407. MkldnnCPU: mkldnn_avg_pool2d_out
  9408. - func: avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor
  9409. python_module: nn
  9410. structured_delegate: avg_pool2d.out
  9411. dispatch:
  9412. MkldnnCPU: mkldnn_avg_pool2d
  9413. QuantizedCPU: avg_pool2d_quantized_cpu
  9414. tags: core
  9415. - func: avg_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)
  9416. python_module: nn
  9417. structured: True
  9418. dispatch:
  9419. CPU: avg_pool2d_backward_out_cpu
  9420. CUDA: avg_pool2d_backward_out_cuda
  9421. MPS: avg_pool2d_backward_out_mps
  9422. MkldnnCPU: mkldnn_avg_pool2d_backward_out
  9423. - func: avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor
  9424. python_module: nn
  9425. structured_delegate: avg_pool2d_backward.grad_input
  9426. dispatch:
  9427. MkldnnCPU: mkldnn_avg_pool2d_backward
  9428. tags: core
  9429. - func: avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)
  9430. python_module: nn
  9431. structured: True
  9432. dispatch:
  9433. CPU: avg_pool3d_out_cpu
  9434. CUDA: avg_pool3d_out_cuda
  9435. MkldnnCPU: mkldnn_avg_pool3d_out
  9436. - func: avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor
  9437. python_module: nn
  9438. structured_delegate: avg_pool3d.out
  9439. dispatch:
  9440. MkldnnCPU: mkldnn_avg_pool3d
  9441. QuantizedCPU: avg_pool3d_quantized_cpu
  9442. - func: avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)
  9443. python_module: nn
  9444. structured: True
  9445. dispatch:
  9446. CPU: avg_pool3d_backward_out_cpu
  9447. CUDA: avg_pool3d_backward_out_cuda
  9448. MkldnnCPU: mkldnn_avg_pool3d_backward_out
  9449. - func: avg_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor
  9450. python_module: nn
  9451. structured_delegate: avg_pool3d_backward.grad_input
  9452. dispatch:
  9453. MkldnnCPU: mkldnn_avg_pool3d_backward
  9454. # Return: (Tensor output, Tensor indices)
  9455. - func: fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
  9456. python_module: nn
  9457. structured: True
  9458. dispatch:
  9459. CPU: fractional_max_pool2d_out_cpu
  9460. CUDA: fractional_max_pool2d_out_cuda
  9461. # Return: (Tensor output, Tensor indices)
  9462. - func: fractional_max_pool2d(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples) -> (Tensor, Tensor)
  9463. python_module: nn
  9464. structured_delegate: fractional_max_pool2d.output
  9465. - func: fractional_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
  9466. python_module: nn
  9467. structured: True
  9468. dispatch:
  9469. CPU: fractional_max_pool2d_backward_cpu
  9470. CUDA: fractional_max_pool2d_backward_cuda
  9471. - func: fractional_max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices) -> Tensor
  9472. python_module: nn
  9473. structured_delegate: fractional_max_pool2d_backward.grad_input
  9474. # Return: (Tensor output, Tensor indices)
  9475. - func: fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
  9476. python_module: nn
  9477. structured: True
  9478. precomputed:
  9479. - kernel_size -> int poolSizeT, int poolSizeH, int poolSizeW
  9480. - output_size -> int outputT, int outputH, int outputW
  9481. - int numBatch, int numPlanes, int inputT, int inputH, int inputW
  9482. dispatch:
  9483. CPU: fractional_max_pool3d_out_cpu
  9484. CUDA: fractional_max_pool3d_out_cuda
  9485. # Return: (Tensor output, Tensor indices)
  9486. - func: fractional_max_pool3d(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples) -> (Tensor, Tensor)
  9487. python_module: nn
  9488. structured_delegate: fractional_max_pool3d.output
  9489. - func: fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
  9490. python_module: nn
  9491. dispatch:
  9492. CPU: fractional_max_pool3d_backward_out_cpu
  9493. CUDA: fractional_max_pool3d_backward_out_cuda
  9494. - func: fractional_max_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices) -> Tensor
  9495. python_module: nn
  9496. dispatch:
  9497. CPU: fractional_max_pool3d_backward_cpu
  9498. CUDA: fractional_max_pool3d_backward_cuda
  9499. # Return: (Tensor output, Tensor indices)
  9500. - func: max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
  9501. python_module: nn
  9502. structured: True
  9503. dispatch:
  9504. CPU: max_pool2d_with_indices_out_cpu
  9505. CUDA: max_pool2d_with_indices_out_cuda
  9506. MPS: max_pool2d_with_indices_out_mps
  9507. # Return: (Tensor output, Tensor indices)
  9508. - func: max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)
  9509. python_module: nn
  9510. structured_delegate: max_pool2d_with_indices.out
  9511. tags: core
  9512. - func: max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
  9513. python_module: nn
  9514. structured: True
  9515. dispatch:
  9516. CPU: max_pool2d_with_indices_backward_out_cpu
  9517. CUDA: max_pool2d_with_indices_backward_out_cuda
  9518. MPS: max_pool2d_with_indices_backward_out_mps
  9519. - func: max_pool2d_with_indices_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices) -> Tensor
  9520. python_module: nn
  9521. structured_delegate: max_pool2d_with_indices_backward.grad_input
  9522. tags: core
  9523. # Return: (Tensor output, Tensor indices)
  9524. - func: max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
  9525. python_module: nn
  9526. dispatch:
  9527. CPU: max_pool3d_with_indices_out_cpu
  9528. CUDA: max_pool3d_with_indices_out_cuda
  9529. # Return: (Tensor output, Tensor indices)
  9530. - func: max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)
  9531. python_module: nn
  9532. dispatch:
  9533. CPU: max_pool3d_with_indices_cpu
  9534. CUDA: max_pool3d_with_indices_cuda
  9535. tags: core
  9536. - func: max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
  9537. python_module: nn
  9538. dispatch:
  9539. CPU: max_pool3d_with_indices_backward_out_cpu
  9540. CUDA: max_pool3d_with_indices_backward_out_cuda
  9541. - func: max_pool3d_with_indices_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices) -> Tensor
  9542. python_module: nn
  9543. dispatch:
  9544. CPU: max_pool3d_with_indices_backward_cpu
  9545. CUDA: max_pool3d_with_indices_backward_cuda
  9546. - func: max_unpool2d.out(Tensor self, Tensor indices, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
  9547. python_module: nn
  9548. dispatch:
  9549. CPU: max_unpooling2d_forward_out_cpu
  9550. CUDA: max_unpooling2d_forward_out_cuda
  9551. - func: max_unpool2d(Tensor self, Tensor indices, int[2] output_size) -> Tensor
  9552. python_module: nn
  9553. dispatch:
  9554. CPU: max_unpooling2d_forward_cpu
  9555. CUDA: max_unpooling2d_forward_cuda
  9556. - func: max_unpool3d.out(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!)
  9557. python_module: nn
  9558. dispatch:
  9559. CPU: max_unpooling3d_forward_out_cpu
  9560. CUDA: max_unpooling3d_forward_out_cuda
  9561. - func: max_unpool3d(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding) -> Tensor
  9562. python_module: nn
  9563. dispatch:
  9564. CPU: max_unpooling3d_forward_cpu
  9565. CUDA: max_unpooling3d_forward_cuda
  9566. - func: reflection_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)
  9567. python_module: nn
  9568. structured: True
  9569. dispatch:
  9570. CPU: reflection_pad1d_out_cpu
  9571. QuantizedCPU: reflection_pad1d_out_quantized_cpu
  9572. CUDA: reflection_pad1d_out_cuda
  9573. MPS: reflection_pad1d_out_mps
  9574. - func: reflection_pad1d(Tensor self, SymInt[2] padding) -> Tensor
  9575. python_module: nn
  9576. structured_delegate: reflection_pad1d.out
  9577. - func: reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
  9578. python_module: nn
  9579. structured: True
  9580. dispatch:
  9581. CPU: reflection_pad1d_backward_out_cpu
  9582. CUDA: reflection_pad1d_backward_out_cuda
  9583. MPS: reflection_pad1d_backward_out_mps
  9584. - func: reflection_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor
  9585. python_module: nn
  9586. structured_delegate: reflection_pad1d_backward.grad_input
  9587. - func: reflection_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)
  9588. python_module: nn
  9589. dispatch:
  9590. CPU, QuantizedCPU: reflection_pad2d_out_cpu
  9591. CUDA: reflection_pad2d_out_cuda
  9592. MPS: reflection_pad2d_out_mps
  9593. - func: reflection_pad2d(Tensor self, SymInt[4] padding) -> Tensor
  9594. python_module: nn
  9595. dispatch:
  9596. CPU: reflection_pad2d_cpu
  9597. QuantizedCPU: reflection_pad2d_quantized_cpu
  9598. CUDA: reflection_pad2d_cuda
  9599. MPS: reflection_pad2d_mps
  9600. tags: core
  9601. - func: reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
  9602. python_module: nn
  9603. dispatch:
  9604. CPU: reflection_pad2d_backward_out_cpu
  9605. CUDA: reflection_pad2d_backward_out_cuda
  9606. MPS: reflection_pad2d_backward_out_mps
  9607. - func: reflection_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor
  9608. python_module: nn
  9609. dispatch:
  9610. CPU: reflection_pad2d_backward_cpu
  9611. CUDA: reflection_pad2d_backward_cuda
  9612. MPS: reflection_pad2d_backward_mps
  9613. - func: reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)
  9614. python_module: nn
  9615. structured: True
  9616. dispatch:
  9617. CPU: reflection_pad3d_out_cpu
  9618. CUDA: reflection_pad3d_out_cuda
  9619. MPS: reflection_pad3d_out_mps
  9620. - func: reflection_pad3d(Tensor self, SymInt[6] padding) -> Tensor
  9621. python_module: nn
  9622. structured_delegate: reflection_pad3d.out
  9623. - func: reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
  9624. python_module: nn
  9625. structured: True
  9626. dispatch:
  9627. CPU: reflection_pad3d_backward_out_cpu
  9628. CUDA: reflection_pad3d_backward_out_cuda
  9629. MPS: reflection_pad3d_backward_out_mps
  9630. - func: reflection_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor
  9631. python_module: nn
  9632. structured_delegate: reflection_pad3d_backward.grad_input
  9633. - func: replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)
  9634. python_module: nn
  9635. structured: True
  9636. dispatch:
  9637. CPU: replication_pad1d_out_cpu
  9638. CUDA: replication_pad1d_out_cuda
  9639. MPS: replication_pad1d_out_mps
  9640. - func: replication_pad1d(Tensor self, SymInt[2] padding) -> Tensor
  9641. python_module: nn
  9642. structured_delegate: replication_pad1d.out
  9643. - func: replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
  9644. python_module: nn
  9645. structured: True
  9646. dispatch:
  9647. CPU: replication_pad1d_backward_out_cpu
  9648. CUDA: replication_pad1d_backward_out_cuda
  9649. MPS: replication_pad1d_backward_out_mps
  9650. - func: replication_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor
  9651. python_module: nn
  9652. structured_delegate: replication_pad1d_backward.grad_input
  9653. - func: replication_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)
  9654. python_module: nn
  9655. structured: True
  9656. dispatch:
  9657. CPU: replication_pad2d_out_cpu
  9658. CUDA: replication_pad2d_out_cuda
  9659. MPS: replication_pad2d_out_mps
  9660. - func: replication_pad2d(Tensor self, SymInt[4] padding) -> Tensor
  9661. python_module: nn
  9662. structured_delegate: replication_pad2d.out
  9663. tags: core
  9664. - func: replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
  9665. python_module: nn
  9666. dispatch:
  9667. CPU: replication_pad2d_backward_out_cpu
  9668. CUDA: replication_pad2d_backward_out_cuda
  9669. MPS: replication_pad2d_backward_out_mps
  9670. - func: replication_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor
  9671. python_module: nn
  9672. dispatch:
  9673. CPU: replication_pad2d_backward_cpu
  9674. CUDA: replication_pad2d_backward_cuda
  9675. MPS: replication_pad2d_backward_mps
  9676. - func: replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)
  9677. python_module: nn
  9678. structured: True
  9679. dispatch:
  9680. CPU: replication_pad3d_out_cpu
  9681. CUDA: replication_pad3d_out_cuda
  9682. MPS: replication_pad3d_out_mps
  9683. - func: replication_pad3d(Tensor self, SymInt[6] padding) -> Tensor
  9684. python_module: nn
  9685. structured_delegate: replication_pad3d.out
  9686. tags: core
  9687. - func: replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
  9688. python_module: nn
  9689. dispatch:
  9690. CPU: replication_pad3d_backward_out_cpu
  9691. CUDA: replication_pad3d_backward_out_cuda
  9692. MPS: replication_pad3d_backward_out_mps
  9693. - func: replication_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor
  9694. python_module: nn
  9695. dispatch:
  9696. CPU: replication_pad3d_backward_cpu
  9697. CUDA: replication_pad3d_backward_cuda
  9698. MPS: replication_pad3d_backward_mps
  9699. - func: _pad_circular(Tensor self, SymInt[] pad) -> Tensor
  9700. python_module: nn
  9701. dispatch:
  9702. CompositeImplicitAutograd: _pad_circular_symint
  9703. - func: _pad_enum(Tensor self, SymInt[] pad, int mode, float? value=None) -> Tensor
  9704. python_module: nn
  9705. dispatch:
  9706. CompositeImplicitAutograd: _pad_enum_symint
  9707. - func: pad(Tensor self, SymInt[] pad, str mode="constant", float? value=None) -> Tensor
  9708. python_module: nn
  9709. dispatch:
  9710. CompositeImplicitAutograd: pad_symint
  9711. - func: upsample_linear1d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
  9712. python_module: nn
  9713. autogen: upsample_linear1d.vec_out
  9714. - func: upsample_bilinear2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
  9715. python_module: nn
  9716. autogen: upsample_bilinear2d.vec_out
  9717. tags: core
  9718. - func: _upsample_bilinear2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
  9719. python_module: nn
  9720. autogen: _upsample_bilinear2d_aa.vec_out
  9721. - func: upsample_trilinear3d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
  9722. python_module: nn
  9723. autogen: upsample_trilinear3d.vec_out
  9724. - func: upsample_bicubic2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
  9725. python_module: nn
  9726. autogen: upsample_bicubic2d.vec_out
  9727. - func: _upsample_bicubic2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
  9728. python_module: nn
  9729. autogen: _upsample_bicubic2d_aa.vec_out
  9730. - func: upsample_nearest1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
  9731. python_module: nn
  9732. autogen: upsample_nearest1d.vec_out
  9733. - func: _upsample_nearest_exact1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
  9734. python_module: nn
  9735. autogen: _upsample_nearest_exact1d.vec_out
  9736. - func: upsample_nearest2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
  9737. python_module: nn
  9738. autogen: upsample_nearest2d.vec_out
  9739. tags: core
  9740. - func: _upsample_nearest_exact2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
  9741. python_module: nn
  9742. autogen: _upsample_nearest_exact2d.vec_out
  9743. - func: upsample_nearest3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
  9744. python_module: nn
  9745. autogen: upsample_nearest3d.vec_out
  9746. - func: _upsample_nearest_exact3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
  9747. python_module: nn
  9748. autogen: _upsample_nearest_exact3d.vec_out
  9749. # NOTE: all of the non-"vec" upsample overloads are only kept for backward compatibility.
  9750. - func: upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
  9751. python_module: nn
  9752. structured: True
  9753. dispatch:
  9754. CPU: upsample_linear1d_out_cpu
  9755. CUDA: upsample_linear1d_out_cuda
  9756. - func: upsample_linear1d(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None) -> Tensor
  9757. python_module: nn
  9758. structured_delegate: upsample_linear1d.out
  9759. - func: upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
  9760. python_module: nn
  9761. structured: True
  9762. dispatch:
  9763. CPU: upsample_linear1d_backward_out_cpu
  9764. CUDA: upsample_linear1d_backward_out_cuda
  9765. - func: upsample_linear1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None) -> Tensor
  9766. python_module: nn
  9767. structured_delegate: upsample_linear1d_backward.grad_input
  9768. - func: upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
  9769. python_module: nn
  9770. structured: True
  9771. dispatch:
  9772. CPU: upsample_bilinear2d_out_cpu
  9773. CUDA: upsample_bilinear2d_out_cuda
  9774. MPS: upsample_bilinear2d_out_mps
  9775. - func: upsample_bilinear2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
  9776. python_module: nn
  9777. structured_delegate: upsample_bilinear2d.out
  9778. dispatch:
  9779. QuantizedCPU: upsample_bilinear2d_quantized_cpu
  9780. - func: upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
  9781. python_module: nn
  9782. structured: True
  9783. dispatch:
  9784. CPU: upsample_bilinear2d_backward_out_cpu
  9785. CUDA: upsample_bilinear2d_backward_out_cuda
  9786. MPS: upsample_bilinear2d_backward_out_mps
  9787. - func: upsample_bilinear2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
  9788. python_module: nn
  9789. structured_delegate: upsample_bilinear2d_backward.grad_input
  9790. - func: _upsample_bilinear2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
  9791. python_module: nn
  9792. structured: True
  9793. dispatch:
  9794. CPU: _upsample_bilinear2d_aa_out_cpu
  9795. CUDA: _upsample_bilinear2d_aa_out_cuda
  9796. - func: _upsample_bilinear2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
  9797. python_module: nn
  9798. structured_delegate: _upsample_bilinear2d_aa.out
  9799. - func: _upsample_bilinear2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
  9800. python_module: nn
  9801. structured: True
  9802. dispatch:
  9803. CPU: _upsample_bilinear2d_aa_backward_out_cpu
  9804. CUDA: _upsample_bilinear2d_aa_backward_out_cuda
  9805. - func: _upsample_bilinear2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
  9806. python_module: nn
  9807. structured_delegate: _upsample_bilinear2d_aa_backward.grad_input
  9808. - func: upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
  9809. python_module: nn
  9810. structured: True
  9811. dispatch:
  9812. CPU: upsample_bicubic2d_out_cpu
  9813. CUDA: upsample_bicubic2d_out_cuda
  9814. - func: upsample_bicubic2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
  9815. python_module: nn
  9816. structured_delegate: upsample_bicubic2d.out
  9817. - func: upsample_bicubic2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
  9818. python_module: nn
  9819. structured: True
  9820. dispatch:
  9821. CPU: upsample_bicubic2d_backward_out_cpu
  9822. CUDA: upsample_bicubic2d_backward_out_cuda
  9823. - func: upsample_bicubic2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
  9824. python_module: nn
  9825. structured_delegate: upsample_bicubic2d_backward.grad_input
  9826. - func: _upsample_bicubic2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
  9827. python_module: nn
  9828. structured: True
  9829. dispatch:
  9830. CPU: _upsample_bicubic2d_aa_out_cpu
  9831. CUDA: _upsample_bicubic2d_aa_out_cuda
  9832. - func: _upsample_bicubic2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
  9833. python_module: nn
  9834. structured_delegate: _upsample_bicubic2d_aa.out
  9835. - func: _upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
  9836. python_module: nn
  9837. structured: True
  9838. dispatch:
  9839. CPU: _upsample_bicubic2d_aa_backward_out_cpu
  9840. CUDA: _upsample_bicubic2d_aa_backward_out_cuda
  9841. - func: _upsample_bicubic2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
  9842. python_module: nn
  9843. structured_delegate: _upsample_bicubic2d_aa_backward.grad_input
  9844. - func: upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
  9845. python_module: nn
  9846. structured: True
  9847. dispatch:
  9848. CPU: upsample_trilinear3d_out_cpu
  9849. CUDA: upsample_trilinear3d_out_cuda
  9850. - func: upsample_trilinear3d(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
  9851. python_module: nn
  9852. structured_delegate: upsample_trilinear3d.out
  9853. - func: upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
  9854. python_module: nn
  9855. structured: True
  9856. dispatch:
  9857. CPU: upsample_trilinear3d_backward_out_cpu
  9858. CUDA: upsample_trilinear3d_backward_out_cuda
  9859. - func: upsample_trilinear3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
  9860. python_module: nn
  9861. structured_delegate: upsample_trilinear3d_backward.grad_input
  9862. - func: upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
  9863. python_module: nn
  9864. structured: True
  9865. dispatch:
  9866. CPU: upsample_nearest1d_out_cpu
  9867. CUDA: upsample_nearest1d_out_cuda
  9868. MPS: upsample_nearest1d_out_mps
  9869. - func: _upsample_nearest_exact1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
  9870. python_module: nn
  9871. structured: True
  9872. dispatch:
  9873. CPU: _upsample_nearest_exact1d_out_cpu
  9874. CUDA: _upsample_nearest_exact1d_out_cuda
  9875. MPS: _upsample_nearest_exact1d_out_mps
  9876. - func: upsample_nearest1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor
  9877. python_module: nn
  9878. structured_delegate: upsample_nearest1d.out
  9879. - func: _upsample_nearest_exact1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor
  9880. python_module: nn
  9881. structured_delegate: _upsample_nearest_exact1d.out
  9882. - func: upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
  9883. python_module: nn
  9884. structured: True
  9885. dispatch:
  9886. CPU: upsample_nearest1d_backward_out_cpu
  9887. CUDA: upsample_nearest1d_backward_out_cuda
  9888. MPS: upsample_nearest1d_backward_out_mps
  9889. - func: _upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
  9890. python_module: nn
  9891. structured: True
  9892. dispatch:
  9893. CPU: _upsample_nearest_exact1d_backward_out_cpu
  9894. CUDA: _upsample_nearest_exact1d_backward_out_cuda
  9895. MPS: _upsample_nearest_exact1d_backward_out_mps
  9896. - func: upsample_nearest1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor
  9897. python_module: nn
  9898. structured_delegate: upsample_nearest1d_backward.grad_input
  9899. - func: _upsample_nearest_exact1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor
  9900. python_module: nn
  9901. structured_delegate: _upsample_nearest_exact1d_backward.grad_input
  9902. - func: upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
  9903. python_module: nn
  9904. structured: True
  9905. dispatch:
  9906. CPU: upsample_nearest2d_out_cpu
  9907. CUDA: upsample_nearest2d_out_cuda
  9908. MPS: upsample_nearest2d_out_mps
  9909. - func: _upsample_nearest_exact2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
  9910. python_module: nn
  9911. structured: True
  9912. dispatch:
  9913. CPU: _upsample_nearest_exact2d_out_cpu
  9914. CUDA: _upsample_nearest_exact2d_out_cuda
  9915. MPS: _upsample_nearest_exact2d_out_mps
  9916. - func: upsample_nearest2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor
  9917. python_module: nn
  9918. structured_delegate: upsample_nearest2d.out
  9919. dispatch:
  9920. QuantizedCPU: upsample_nearest2d_quantized_cpu
  9921. - func: _upsample_nearest_exact2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor
  9922. python_module: nn
  9923. structured_delegate: _upsample_nearest_exact2d.out
  9924. dispatch:
  9925. QuantizedCPU: _upsample_nearest_exact2d_quantized_cpu
  9926. - func: upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
  9927. python_module: nn
  9928. structured: True
  9929. dispatch:
  9930. CPU: upsample_nearest2d_backward_out_cpu
  9931. CUDA: upsample_nearest2d_backward_out_cuda
  9932. MPS: upsample_nearest2d_backward_out_mps
  9933. - func: _upsample_nearest_exact2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
  9934. python_module: nn
  9935. structured: True
  9936. dispatch:
  9937. CPU: _upsample_nearest_exact2d_backward_out_cpu
  9938. CUDA: _upsample_nearest_exact2d_backward_out_cuda
  9939. MPS: _upsample_nearest_exact2d_backward_out_mps
  9940. - func: upsample_nearest2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor
  9941. python_module: nn
  9942. structured_delegate: upsample_nearest2d_backward.grad_input
  9943. - func: _upsample_nearest_exact2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor
  9944. python_module: nn
  9945. structured_delegate: _upsample_nearest_exact2d_backward.grad_input
  9946. - func: upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
  9947. python_module: nn
  9948. structured: True
  9949. dispatch:
  9950. CPU: upsample_nearest3d_out_cpu
  9951. CUDA: upsample_nearest3d_out_cuda
  9952. - func: _upsample_nearest_exact3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
  9953. python_module: nn
  9954. structured: True
  9955. dispatch:
  9956. CPU: _upsample_nearest_exact3d_out_cpu
  9957. CUDA: _upsample_nearest_exact3d_out_cuda
  9958. - func: upsample_nearest3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
  9959. python_module: nn
  9960. structured_delegate: upsample_nearest3d.out
  9961. dispatch:
  9962. QuantizedCPU: upsample_nearest3d_quantized_cpu
  9963. - func: _upsample_nearest_exact3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
  9964. python_module: nn
  9965. structured_delegate: _upsample_nearest_exact3d.out
  9966. dispatch:
  9967. QuantizedCPU: _upsample_nearest_exact3d_quantized_cpu
  9968. - func: upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
  9969. python_module: nn
  9970. structured: True
  9971. dispatch:
  9972. CPU: upsample_nearest3d_backward_out_cpu
  9973. CUDA: upsample_nearest3d_backward_out_cuda
  9974. - func: _upsample_nearest_exact3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
  9975. python_module: nn
  9976. structured: True
  9977. dispatch:
  9978. CPU: _upsample_nearest_exact3d_backward_out_cpu
  9979. CUDA: _upsample_nearest_exact3d_backward_out_cuda
  9980. - func: upsample_nearest3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
  9981. python_module: nn
  9982. structured_delegate: upsample_nearest3d_backward.grad_input
  9983. - func: _upsample_nearest_exact3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
  9984. python_module: nn
  9985. structured_delegate: _upsample_nearest_exact3d_backward.grad_input
  9986. - func: sigmoid_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)
  9987. python_module: nn
  9988. structured: True
  9989. structured_inherits: TensorIteratorBase
  9990. dispatch:
  9991. CPU, CUDA: sigmoid_backward_out
  9992. MPS: sigmoid_backward_out_mps
  9993. tags: pointwise
  9994. - func: sigmoid_backward(Tensor grad_output, Tensor output) -> Tensor
  9995. python_module: nn
  9996. structured_delegate: sigmoid_backward.grad_input
  9997. tags: pointwise
  9998. - func: logit_backward.grad_input(Tensor grad_output, Tensor self, float? eps=None, *, Tensor(a!) grad_input) -> Tensor(a!)
  9999. python_module: nn
  10000. structured: True
  10001. structured_inherits: TensorIteratorBase
  10002. dispatch:
  10003. CPU, CUDA: logit_backward_out
  10004. tags: pointwise
  10005. - func: logit_backward(Tensor grad_output, Tensor self, float? eps=None) -> Tensor
  10006. python_module: nn
  10007. structured_delegate: logit_backward.grad_input
  10008. tags: pointwise
  10009. - func: tanh_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)
  10010. python_module: nn
  10011. structured: True
  10012. structured_inherits: TensorIteratorBase
  10013. dispatch:
  10014. CPU, CUDA: tanh_backward_out
  10015. MPS: tanh_backward_out_mps
  10016. tags: pointwise
  10017. - func: tanh_backward(Tensor grad_output, Tensor output) -> Tensor
  10018. python_module: nn
  10019. structured_delegate: tanh_backward.grad_input
  10020. # What's a thnn_conv_ versus a slow_conv_?
  10021. #
  10022. # Historically, we have inefficient implementations of convolutions
  10023. # coming from the THNN/THCUNN library. These convolutions typically
  10024. # operated by computing the Toeplitz matrix and then doing a matrix
  10025. # multiply with the input; this is very memory inefficient! However,
  10026. # occasionally, we really don't have anything better, so it's helpful
  10027. # to have these fallbacks when there is no more optimized implementation
  10028. # in cudnn or mkldnn, etc. Both thnn_ and slow_ convolutions fall
  10029. # into this bucket.
  10030. #
  10031. # The difference between these two designations, is that thnn_ refers
  10032. # to a convolution that is still written in the "legacy" style; that is,
  10033. # C code in the THNN/ or THCUNN/ directory. A slow_ convolution is
  10034. # one that is written in the native style: modern C++. Algorithmically,
  10035. # these are the same thing, but we give them different prefixes to
  10036. # make the operational distinction clear.
  10037. tags: pointwise
  10038. - func: slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
  10039. python_module: nn
  10040. structured: True
  10041. dispatch:
  10042. CPU: slow_conv_transpose2d_structured_cpu
  10043. CUDA: slow_conv_transpose2d_structured_cuda
  10044. - func: slow_conv_transpose2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1) -> Tensor
  10045. python_module: nn
  10046. structured_delegate: slow_conv_transpose2d.out
  10047. - func: slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
  10048. python_module: nn
  10049. dispatch:
  10050. CPU: slow_conv_transpose3d_out_cpu
  10051. CUDA: slow_conv_transpose3d_out_cuda
  10052. - func: slow_conv_transpose3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1) -> Tensor
  10053. python_module: nn
  10054. dispatch:
  10055. CPU: slow_conv_transpose3d_cpu
  10056. CUDA: slow_conv_transpose3d_cuda
  10057. - func: thnn_conv2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, *, Tensor(a!) out) -> Tensor(a!)
  10058. python_module: nn
  10059. - func: thnn_conv2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0) -> Tensor
  10060. python_module: nn
  10061. - func: _slow_conv2d_forward.output(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, *, Tensor(a!) output) -> Tensor(a!)
  10062. python_module: nn
  10063. dispatch:
  10064. CPU: slow_conv2d_forward_out_cpu
  10065. CUDA: slow_conv2d_forward_out_cuda
  10066. - func: _slow_conv2d_forward(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding) -> Tensor
  10067. python_module: nn
  10068. dispatch:
  10069. CPU: slow_conv2d_forward_cpu
  10070. CUDA: slow_conv2d_forward_cuda
  10071. - func: _slow_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))
  10072. python_module: nn
  10073. dispatch:
  10074. CPU: slow_conv2d_backward_out_cpu
  10075. CUDA: slow_conv2d_backward_out_cuda
  10076. - func: _slow_conv2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)
  10077. python_module: nn
  10078. dispatch:
  10079. CPU: slow_conv2d_backward_cpu
  10080. CUDA: slow_conv2d_backward_cuda
  10081. autogen: _slow_conv2d_backward.output_mask_out
  10082. - func: _conv_depthwise2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, SymInt[2] padding, int[2] dilation, *, Tensor(a!) out) -> Tensor(a!)
  10083. use_const_ref_for_mutable_tensors: True
  10084. python_module: nn
  10085. dispatch:
  10086. CUDA: conv_depthwise2d_cuda_out
  10087. - func: _conv_depthwise2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, SymInt[2] padding, int[2] dilation) -> Tensor
  10088. python_module: nn
  10089. dispatch:
  10090. CUDA: conv_depthwise2d_cuda
  10091. - func: conv_depthwise3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation) -> Tensor
  10092. python_module: nn
  10093. dispatch:
  10094. CUDA: conv_depthwise3d_cuda
  10095. autogen: conv_depthwise3d.out
  10096. - func: slow_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!)
  10097. python_module: nn
  10098. - func: slow_conv3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0) -> Tensor
  10099. python_module: nn
  10100. - func: slow_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!)
  10101. python_module: nn
  10102. dispatch:
  10103. CPU: slow_conv3d_forward_out_cpu
  10104. - func: slow_conv3d_forward(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding) -> Tensor
  10105. python_module: nn
  10106. dispatch:
  10107. CPU: slow_conv3d_forward_cpu
  10108. - func: slow_conv_dilated2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1) -> Tensor
  10109. python_module: nn
  10110. dispatch:
  10111. CPU: slow_conv_dilated2d_cpu
  10112. CUDA: slow_conv_dilated2d_cuda
  10113. autogen: slow_conv_dilated2d.out
  10114. - func: slow_conv_dilated3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1) -> Tensor
  10115. python_module: nn
  10116. dispatch:
  10117. CPU: slow_conv_dilated3d_cpu
  10118. CUDA: slow_conv_dilated3d_cuda
  10119. autogen: slow_conv_dilated3d.out
  10120. - func: col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)
  10121. python_module: nn
  10122. dispatch:
  10123. CPU: col2im_out_cpu
  10124. CUDA: col2im_out_cuda
  10125. - func: col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor
  10126. python_module: nn
  10127. dispatch:
  10128. CPU: col2im_cpu
  10129. CUDA: col2im_cuda
  10130. tags: core
  10131. - func: column_stack(Tensor[] tensors) -> Tensor
  10132. - func: column_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
  10133. - func: im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)
  10134. python_module: nn
  10135. dispatch:
  10136. CPU: im2col_out_cpu
  10137. CUDA: im2col_out_cuda
  10138. - func: im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor
  10139. python_module: nn
  10140. dispatch:
  10141. CPU: im2col_cpu
  10142. CUDA: im2col_cuda
  10143. - func: isfinite(Tensor self) -> Tensor
  10144. variants: function, method
  10145. device_check: NoCheck
  10146. device_guard: False
  10147. - func: isinf(Tensor self) -> Tensor
  10148. variants: function, method
  10149. device_check: NoCheck
  10150. device_guard: False
  10151. dispatch:
  10152. CompositeExplicitAutograd: isinf
  10153. SparseCPU, SparseCUDA: isinf_sparse
  10154. SparseMeta: isinf_sparse_meta
  10155. SparseCsrCPU, SparseCsrCUDA: isinf_sparse_csr
  10156. autogen: isinf.out
  10157. tags: core
  10158. - func: record_stream(Tensor(a!) self, Stream s) -> ()
  10159. variants: method
  10160. dispatch:
  10161. CUDA: record_stream_cuda
  10162. - func: isposinf(Tensor self) -> Tensor
  10163. variants: function, method
  10164. structured_delegate: isposinf.out
  10165. dispatch:
  10166. SparseCPU, SparseCUDA: isposinf_sparse
  10167. SparseCsrCPU, SparseCsrCUDA: isposinf_sparse_csr
  10168. tags: pointwise
  10169. - func: isposinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  10170. structured: True
  10171. structured_inherits: TensorIteratorBase
  10172. dispatch:
  10173. CPU, CUDA: isposinf_out
  10174. SparseCPU, SparseCUDA: isposinf_sparse_out
  10175. SparseCsrCPU, SparseCsrCUDA: isposinf_sparse_csr_out
  10176. tags: pointwise
  10177. - func: isneginf(Tensor self) -> Tensor
  10178. variants: function, method
  10179. structured_delegate: isneginf.out
  10180. dispatch:
  10181. SparseCPU, SparseCUDA: isneginf_sparse
  10182. SparseCsrCPU, SparseCsrCUDA: isneginf_sparse_csr
  10183. tags: pointwise
  10184. - func: isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  10185. structured: True
  10186. structured_inherits: TensorIteratorBase
  10187. dispatch:
  10188. CPU, CUDA: isneginf_out
  10189. SparseCPU, SparseCUDA: isneginf_sparse_out
  10190. SparseCsrCPU, SparseCsrCUDA: isneginf_sparse_csr_out
  10191. tags: pointwise
  10192. # NOTE [_add_batch_dim and _remove_batch_dim]
  10193. # _add_batch_dim and _remove_batch_dim are meant to be used in the implementation
  10194. # of the vmap frontend API (see torch/_vmap_internals.py). They are not
  10195. # user-facing, hence the leading underscore. Please don't use them them anywhere else.
  10196. - func: _add_batch_dim(Tensor self, int batch_dim, int level) -> Tensor
  10197. variants: function
  10198. # See NOTE [_add_batch_dim and _remove_batch_dim]
  10199. - func: _remove_batch_dim(Tensor self, int level, int batch_size, int out_dim) -> Tensor
  10200. variants: function
  10201. ## Functions related to the `torch.special` namespace
  10202. # Note [special namespace binding]
  10203. # Functions in the special python module should have their names start with
  10204. # "special_" underscore and be bound to the desired Python name in
  10205. # torch/special/__init__.py, and the desired C++ name in torch/csrc/api/include/torch/special.h.
  10206. # The "special_" names should be hidden from the user and not documented.
  10207. - func: special_entr(Tensor self) -> Tensor
  10208. structured_delegate: special_entr.out
  10209. python_module: special
  10210. variants: function
  10211. tags: pointwise
  10212. - func: special_entr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  10213. structured: True
  10214. structured_inherits: TensorIteratorBase
  10215. python_module: special
  10216. variants: function
  10217. dispatch:
  10218. CPU, CUDA: special_entr_out
  10219. tags: pointwise
  10220. - func: special_ndtri(Tensor self) -> Tensor
  10221. structured_delegate: special_ndtri.out
  10222. python_module: special
  10223. variants: function
  10224. tags: pointwise
  10225. - func: special_ndtri.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  10226. structured: True
  10227. structured_inherits: TensorIteratorBase
  10228. python_module: special
  10229. variants: function
  10230. dispatch:
  10231. CPU, CUDA: special_ndtri_out
  10232. tags: pointwise
  10233. - func: special_log_ndtr(Tensor self) -> Tensor
  10234. structured_delegate: special_log_ndtr.out
  10235. python_module: special
  10236. variants: function
  10237. tags: pointwise
  10238. - func: special_log_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  10239. structured: True
  10240. structured_inherits: TensorIteratorBase
  10241. python_module: special
  10242. variants: function
  10243. dispatch:
  10244. CPU, CUDA: special_log_ndtr_out
  10245. tags: pointwise
  10246. - func: special_expm1(Tensor self) -> Tensor
  10247. python_module: special
  10248. variants: function
  10249. - func: special_expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  10250. python_module: special
  10251. variants: function
  10252. - func: special_exp2(Tensor self) -> Tensor
  10253. python_module: special
  10254. variants: function
  10255. - func: special_exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  10256. python_module: special
  10257. variants: function
  10258. - func: special_psi(Tensor self) -> Tensor
  10259. python_module: special
  10260. variants: function
  10261. - func: special_psi.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  10262. python_module: special
  10263. variants: function
  10264. - func: special_digamma(Tensor self) -> Tensor
  10265. python_module: special
  10266. variants: function
  10267. - func: special_digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  10268. python_module: special
  10269. variants: function
  10270. - func: special_gammaln(Tensor self) -> Tensor
  10271. python_module: special
  10272. variants: function
  10273. - func: special_gammaln.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  10274. python_module: special
  10275. variants: function
  10276. - func: special_erf(Tensor self) -> Tensor
  10277. python_module: special
  10278. variants: function
  10279. - func: special_erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  10280. python_module: special
  10281. variants: function
  10282. - func: special_erfc(Tensor self) -> Tensor
  10283. python_module: special
  10284. variants: function
  10285. - func: special_erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  10286. python_module: special
  10287. - func: special_erfcx(Tensor self) -> Tensor
  10288. python_module: special
  10289. variants: function
  10290. structured_delegate: special_erfcx.out
  10291. tags: pointwise
  10292. - func: special_erfcx.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  10293. python_module: special
  10294. structured: True
  10295. structured_inherits: TensorIteratorBase
  10296. dispatch:
  10297. CPU, CUDA: special_erfcx_out
  10298. tags: pointwise
  10299. - func: special_erfinv(Tensor self) -> Tensor
  10300. python_module: special
  10301. variants: function
  10302. - func: special_erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  10303. python_module: special
  10304. - func: special_ndtr(Tensor self) -> Tensor
  10305. python_module: special
  10306. variants: function
  10307. - func: special_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  10308. python_module: special
  10309. variants: function
  10310. - func: special_xlog1py(Tensor self, Tensor other) -> Tensor
  10311. device_check: NoCheck # TensorIterator
  10312. python_module: special
  10313. variants: function
  10314. structured_delegate: special_xlog1py.out
  10315. tags: pointwise
  10316. - func: special_xlog1py.self_scalar(Scalar self, Tensor other) -> Tensor
  10317. device_check: NoCheck # TensorIterator
  10318. python_module: special
  10319. variants: function
  10320. dispatch:
  10321. CompositeExplicitAutograd: special_xlog1py
  10322. tags: pointwise
  10323. - func: special_xlog1py.other_scalar(Tensor self, Scalar other) -> Tensor
  10324. device_check: NoCheck # TensorIterator
  10325. python_module: special
  10326. variants: function
  10327. dispatch:
  10328. CompositeExplicitAutograd: special_xlog1py
  10329. tags: pointwise
  10330. - func: special_xlog1py.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  10331. device_check: NoCheck # TensorIterator
  10332. structured: True
  10333. structured_inherits: TensorIteratorBase
  10334. python_module: special
  10335. variants: function
  10336. dispatch:
  10337. CPU, CUDA: special_xlog1py_out
  10338. tags: pointwise
  10339. - func: special_xlog1py.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  10340. device_check: NoCheck # TensorIterator
  10341. python_module: special
  10342. variants: function
  10343. dispatch:
  10344. CompositeExplicitAutograd: special_xlog1py_out
  10345. tags: pointwise
  10346. - func: special_xlog1py.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
  10347. device_check: NoCheck # TensorIterator
  10348. python_module: special
  10349. variants: function
  10350. dispatch:
  10351. CompositeExplicitAutograd: special_xlog1py_out
  10352. tags: pointwise
  10353. - func: special_xlogy(Tensor self, Tensor other) -> Tensor
  10354. device_check: NoCheck # TensorIterator
  10355. python_module: special
  10356. variants: function
  10357. - func: special_xlogy.self_scalar(Scalar self, Tensor other) -> Tensor
  10358. device_check: NoCheck # TensorIterator
  10359. python_module: special
  10360. variants: function
  10361. - func: special_xlogy.other_scalar(Tensor self, Scalar other) -> Tensor
  10362. device_check: NoCheck # TensorIterator
  10363. python_module: special
  10364. variants: function
  10365. - func: special_xlogy.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  10366. device_check: NoCheck # TensorIterator
  10367. python_module: special
  10368. variants: function
  10369. - func: special_xlogy.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  10370. device_check: NoCheck # TensorIterator
  10371. python_module: special
  10372. variants: function
  10373. - func: special_xlogy.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
  10374. device_check: NoCheck # TensorIterator
  10375. python_module: special
  10376. variants: function
  10377. - func: special_zeta(Tensor self, Tensor other) -> Tensor
  10378. device_check: NoCheck # TensorIterator
  10379. python_module: special
  10380. variants: function
  10381. structured_delegate: special_zeta.out
  10382. tags: pointwise
  10383. - func: special_zeta.self_scalar(Scalar self, Tensor other) -> Tensor
  10384. device_check: NoCheck # TensorIterator
  10385. python_module: special
  10386. variants: function
  10387. dispatch:
  10388. CompositeExplicitAutograd: special_zeta
  10389. tags: pointwise
  10390. - func: special_zeta.other_scalar(Tensor self, Scalar other) -> Tensor
  10391. device_check: NoCheck # TensorIterator
  10392. python_module: special
  10393. variants: function
  10394. dispatch:
  10395. CompositeExplicitAutograd: special_zeta
  10396. tags: pointwise
  10397. - func: special_zeta.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  10398. device_check: NoCheck # TensorIterator
  10399. structured: True
  10400. structured_inherits: TensorIteratorBase
  10401. python_module: special
  10402. variants: function
  10403. dispatch:
  10404. CPU, CUDA: special_zeta_out
  10405. tags: pointwise
  10406. - func: special_zeta.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  10407. device_check: NoCheck # TensorIterator
  10408. python_module: special
  10409. variants: function
  10410. dispatch:
  10411. CompositeExplicitAutograd: special_zeta_out
  10412. tags: pointwise
  10413. - func: special_zeta.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
  10414. device_check: NoCheck # TensorIterator
  10415. python_module: special
  10416. variants: function
  10417. dispatch:
  10418. CompositeExplicitAutograd: special_zeta_out
  10419. tags: pointwise
  10420. - func: special_i0(Tensor self) -> Tensor
  10421. python_module: special
  10422. variants: function
  10423. - func: special_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  10424. python_module: special
  10425. variants: function
  10426. - func: special_i0e(Tensor self) -> Tensor
  10427. python_module: special
  10428. variants: function
  10429. structured_delegate: special_i0e.out
  10430. tags: pointwise
  10431. - func: special_i0e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  10432. python_module: special
  10433. structured: True
  10434. structured_inherits: TensorIteratorBase
  10435. dispatch:
  10436. CPU, CUDA: special_i0e_out
  10437. tags: pointwise
  10438. - func: special_i1(Tensor self) -> Tensor
  10439. python_module: special
  10440. variants: function
  10441. structured_delegate: special_i1.out
  10442. tags: pointwise
  10443. - func: special_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  10444. python_module: special
  10445. structured: True
  10446. structured_inherits: TensorIteratorBase
  10447. dispatch:
  10448. CPU, CUDA: special_i1_out
  10449. tags: pointwise
  10450. - func: special_i1e(Tensor self) -> Tensor
  10451. python_module: special
  10452. variants: function
  10453. structured_delegate: special_i1e.out
  10454. tags: pointwise
  10455. - func: special_i1e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  10456. python_module: special
  10457. structured: True
  10458. structured_inherits: TensorIteratorBase
  10459. dispatch:
  10460. CPU, CUDA: special_i1e_out
  10461. tags: pointwise
  10462. - func: special_logit(Tensor self, float? eps=None) -> Tensor
  10463. python_module: special
  10464. variants: function
  10465. - func: special_logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)
  10466. python_module: special
  10467. - func: special_polygamma(int n, Tensor self) -> Tensor
  10468. python_module: special
  10469. variants: function
  10470. - func: special_polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  10471. python_module: special
  10472. - func: special_logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
  10473. python_module: special
  10474. variants: function
  10475. - func: special_logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
  10476. python_module: special
  10477. - func: special_expit(Tensor self) -> Tensor
  10478. python_module: special
  10479. variants: function
  10480. - func: special_expit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  10481. python_module: special
  10482. variants: function
  10483. - func: special_sinc(Tensor self) -> Tensor
  10484. python_module: special
  10485. variants: function
  10486. - func: special_sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  10487. python_module: special
  10488. variants: function
  10489. - func: special_round(Tensor self, *, int decimals=0) -> Tensor
  10490. python_module: special
  10491. variants: function
  10492. - func: special_round.out(Tensor self, *, int decimals=0, Tensor(a!) out) -> Tensor(a!)
  10493. python_module: special
  10494. variants: function
  10495. - func: special_log1p(Tensor self) -> Tensor
  10496. python_module: special
  10497. variants: function
  10498. - func: special_log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  10499. python_module: special
  10500. variants: function
  10501. - func: special_log_softmax(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor
  10502. python_module: special
  10503. variants: function
  10504. - func: special_gammainc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  10505. python_module: special
  10506. variants: function
  10507. - func: special_gammainc(Tensor self, Tensor other) -> Tensor
  10508. python_module: special
  10509. variants: function
  10510. - func: special_gammaincc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  10511. python_module: special
  10512. variants: function
  10513. - func: special_gammaincc(Tensor self, Tensor other) -> Tensor
  10514. python_module: special
  10515. variants: function
  10516. - func: special_multigammaln(Tensor self, int p) -> Tensor
  10517. python_module: special
  10518. variants: function
  10519. - func: special_multigammaln.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)
  10520. python_module: special
  10521. variants: function
  10522. - func: special_softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
  10523. python_module: special
  10524. variants: function
  10525. ## Functions related to the fast Fourier transform and the torch.fft namespace
  10526. # Note [FFT namespace binding]
  10527. # Functions in the fft python module should have their names start with
  10528. # "fft_" underscore and be bound to the desired Python name in
  10529. # torch/fft/__init__.py, and the desired C++ name in torch/csrc/api/include/torch/fft.h.
  10530. # The "fft_" names should be hidden from the user and not documented.
  10531. #
  10532. # See fft_fft as an example.
  10533. # torch.fft.fft
  10534. # NOTE: NOT an alias for torch.fft, which has different semantics
  10535. - func: fft_fft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor
  10536. python_module: fft
  10537. variants: function
  10538. - func: fft_fft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
  10539. python_module: fft
  10540. variants: function
  10541. - func: fft_ifft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor
  10542. python_module: fft
  10543. variants: function
  10544. - func: fft_ifft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
  10545. python_module: fft
  10546. variants: function
  10547. - func: fft_rfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor
  10548. python_module: fft
  10549. variants: function
  10550. - func: fft_rfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
  10551. python_module: fft
  10552. variants: function
  10553. - func: fft_irfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor
  10554. python_module: fft
  10555. variants: function
  10556. - func: fft_irfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
  10557. python_module: fft
  10558. variants: function
  10559. - func: fft_hfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor
  10560. python_module: fft
  10561. variants: function
  10562. - func: fft_hfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
  10563. python_module: fft
  10564. variants: function
  10565. - func: fft_ihfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor
  10566. python_module: fft
  10567. variants: function
  10568. - func: fft_ihfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
  10569. python_module: fft
  10570. variants: function
  10571. - func: fft_fft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
  10572. python_module: fft
  10573. variants: function
  10574. - func: fft_fft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
  10575. python_module: fft
  10576. variants: function
  10577. - func: fft_ifft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
  10578. python_module: fft
  10579. variants: function
  10580. - func: fft_ifft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
  10581. python_module: fft
  10582. variants: function
  10583. - func: fft_rfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
  10584. python_module: fft
  10585. variants: function
  10586. - func: fft_rfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
  10587. python_module: fft
  10588. variants: function
  10589. - func: fft_irfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
  10590. python_module: fft
  10591. variants: function
  10592. - func: fft_irfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
  10593. python_module: fft
  10594. variants: function
  10595. - func: fft_hfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
  10596. use_const_ref_for_mutable_tensors: True
  10597. python_module: fft
  10598. variants: function
  10599. - func: fft_hfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
  10600. use_const_ref_for_mutable_tensors: True
  10601. python_module: fft
  10602. variants: function
  10603. - func: fft_ihfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
  10604. use_const_ref_for_mutable_tensors: True
  10605. python_module: fft
  10606. variants: function
  10607. - func: fft_ihfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
  10608. use_const_ref_for_mutable_tensors: True
  10609. python_module: fft
  10610. variants: function
  10611. - func: fft_fftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
  10612. python_module: fft
  10613. variants: function
  10614. - func: fft_fftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
  10615. python_module: fft
  10616. variants: function
  10617. - func: fft_ifftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
  10618. python_module: fft
  10619. variants: function
  10620. - func: fft_ifftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
  10621. python_module: fft
  10622. variants: function
  10623. - func: fft_rfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
  10624. python_module: fft
  10625. variants: function
  10626. - func: fft_rfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
  10627. python_module: fft
  10628. variants: function
  10629. - func: fft_irfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
  10630. python_module: fft
  10631. variants: function
  10632. - func: fft_irfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
  10633. python_module: fft
  10634. variants: function
  10635. - func: fft_hfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
  10636. use_const_ref_for_mutable_tensors: True
  10637. python_module: fft
  10638. variants: function
  10639. - func: fft_hfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
  10640. use_const_ref_for_mutable_tensors: True
  10641. python_module: fft
  10642. variants: function
  10643. - func: fft_ihfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
  10644. use_const_ref_for_mutable_tensors: True
  10645. python_module: fft
  10646. variants: function
  10647. - func: fft_ihfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
  10648. use_const_ref_for_mutable_tensors: True
  10649. python_module: fft
  10650. variants: function
  10651. - func: fft_fftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  10652. python_module: fft
  10653. variants: function
  10654. dispatch:
  10655. CompositeExplicitAutograd: fft_fftfreq
  10656. - func: fft_fftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!)
  10657. python_module: fft
  10658. variants: function
  10659. dispatch:
  10660. CompositeExplicitAutograd: fft_fftfreq_out
  10661. - func: fft_rfftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  10662. python_module: fft
  10663. variants: function
  10664. dispatch:
  10665. CompositeExplicitAutograd: fft_rfftfreq
  10666. - func: fft_rfftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!)
  10667. python_module: fft
  10668. variants: function
  10669. dispatch:
  10670. CompositeExplicitAutograd: fft_rfftfreq_out
  10671. - func: fft_fftshift(Tensor self, int[1]? dim=None) -> Tensor
  10672. python_module: fft
  10673. variants: function
  10674. - func: fft_ifftshift(Tensor self, int[1]? dim=None) -> Tensor
  10675. python_module: fft
  10676. variants: function
  10677. ## Functions for linear algebra and the torch.linalg namespace
  10678. # Note [linalg namespace binding]
  10679. # Functions in the linalg python module should have their names start with
  10680. # "linalg_" and be bound to the desired Python name in
  10681. # torch/linalg/__init__.py, and the desired C++ name in torch/csrc/api/include/torch/linalg.h.
  10682. # The "linalg_" names should be hidden from the user and not documented.
  10683. #
  10684. # See linalg_det as an example.
  10685. # "_ex" stands for experimental
  10686. - func: linalg_cholesky_ex(Tensor self, *, bool upper=False, bool check_errors=False) -> (Tensor L, Tensor info)
  10687. python_module: linalg
  10688. structured_delegate: linalg_cholesky_ex.L
  10689. - func: linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info)
  10690. python_module: linalg
  10691. structured: True
  10692. dispatch:
  10693. CPU, CUDA: linalg_cholesky_ex_out
  10694. - func: linalg_cholesky(Tensor self, *, bool upper=False) -> Tensor
  10695. python_module: linalg
  10696. - func: linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!)
  10697. python_module: linalg
  10698. - func: linalg_cross(Tensor self, Tensor other, *, int dim=-1) -> Tensor
  10699. python_module: linalg
  10700. variants: function
  10701. structured_delegate: linalg_cross.out
  10702. dispatch:
  10703. ZeroTensor: linalg_cross_zerotensor
  10704. - func: linalg_cross.out(Tensor self, Tensor other, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)
  10705. python_module: linalg
  10706. structured: True
  10707. dispatch:
  10708. CPU, CUDA, MPS: linalg_cross_out
  10709. # linalg.lu_factor
  10710. - func: linalg_lu_factor(Tensor A, *, bool pivot=True) -> (Tensor LU, Tensor pivots)
  10711. python_module: linalg
  10712. variants: function
  10713. - func: linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots)
  10714. python_module: linalg
  10715. variants: function
  10716. - func: linalg_lu_factor_ex(Tensor A, *, bool pivot=True, bool check_errors=False) -> (Tensor LU, Tensor pivots, Tensor info)
  10717. python_module: linalg
  10718. structured_delegate: linalg_lu_factor_ex.out
  10719. variants: function
  10720. - func: linalg_lu_factor_ex.out(Tensor A, *, bool pivot=True, bool check_errors=False, Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info)
  10721. python_module: linalg
  10722. variants: function
  10723. structured: True
  10724. dispatch:
  10725. CPU, CUDA: linalg_lu_factor_ex_out
  10726. # linalg.lu
  10727. - func: linalg_lu(Tensor A, *, bool pivot=True) -> (Tensor P, Tensor L, Tensor U)
  10728. python_module: linalg
  10729. structured_delegate: linalg_lu.out
  10730. variants: function
  10731. - func: linalg_lu.out(Tensor A, *, bool pivot=True, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U)
  10732. python_module: linalg
  10733. variants: function
  10734. structured: True
  10735. dispatch:
  10736. CPU, CUDA: linalg_lu_out
  10737. # linalg.lu_solve
  10738. - func: linalg_lu_solve(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False) -> Tensor
  10739. python_module: linalg
  10740. structured_delegate: linalg_lu_solve.out
  10741. variants: function
  10742. - func: linalg_lu_solve.out(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False, Tensor(a!) out) -> Tensor(a!)
  10743. python_module: linalg
  10744. variants: function
  10745. structured: True
  10746. dispatch:
  10747. CPU, CUDA: linalg_lu_solve_out
  10748. # linalg.det
  10749. - func: _linalg_det(Tensor A) -> (Tensor result, Tensor LU, Tensor pivots)
  10750. structured_delegate: _linalg_det.result
  10751. - func: _linalg_det.result(Tensor A, *, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots)
  10752. structured: True
  10753. dispatch:
  10754. CPU, CUDA: _linalg_det_out
  10755. - func: linalg_det(Tensor A) -> Tensor
  10756. python_module: linalg
  10757. variants: function
  10758. - func: linalg_det.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)
  10759. python_module: linalg
  10760. # torch.det, alias for torch.linalg.det
  10761. - func: det(Tensor self) -> Tensor
  10762. variants: function, method
  10763. - func: linalg_ldl_factor_ex(Tensor self, *, bool hermitian=False, bool check_errors=False) -> (Tensor LD, Tensor pivots, Tensor info)
  10764. structured_delegate: linalg_ldl_factor_ex.out
  10765. python_module: linalg
  10766. variants: function
  10767. - func: linalg_ldl_factor_ex.out(Tensor self, *, bool hermitian=False, bool check_errors=False, Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info)
  10768. structured: True
  10769. python_module: linalg
  10770. variants: function
  10771. dispatch:
  10772. CPU, CUDA: linalg_ldl_factor_ex_out
  10773. - func: linalg_ldl_factor(Tensor self, *, bool hermitian=False) -> (Tensor LD, Tensor pivots)
  10774. python_module: linalg
  10775. variants: function
  10776. - func: linalg_ldl_factor.out(Tensor self, *, bool hermitian=False, Tensor(a!) LD, Tensor(b!) pivots) -> (Tensor(a!) LD, Tensor(b!) pivots)
  10777. python_module: linalg
  10778. variants: function
  10779. - func: linalg_ldl_solve(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False) -> Tensor
  10780. structured_delegate: linalg_ldl_solve.out
  10781. python_module: linalg
  10782. variants: function
  10783. - func: linalg_ldl_solve.out(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
  10784. structured: True
  10785. python_module: linalg
  10786. variants: function
  10787. dispatch:
  10788. CPU, CUDA: linalg_ldl_solve_out
  10789. - func: linalg_lstsq(Tensor self, Tensor b, float? rcond=None, *, str? driver=None) -> (Tensor solution, Tensor residuals, Tensor rank, Tensor singular_values)
  10790. python_module: linalg
  10791. variants: function
  10792. dispatch:
  10793. CompositeExplicitAutograd: linalg_lstsq
  10794. tags: dynamic_output_shape
  10795. - func: linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values)
  10796. python_module: linalg
  10797. variants: function
  10798. dispatch:
  10799. CPU, CUDA: linalg_lstsq_out
  10800. tags: dynamic_output_shape
  10801. # torch.linalg.matmul, alias for torch.matmul
  10802. - func: linalg_matmul(Tensor self, Tensor other) -> Tensor
  10803. python_module: linalg
  10804. variants: function
  10805. - func: linalg_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  10806. python_module: linalg
  10807. - func: linalg_vecdot(Tensor x, Tensor y, *, int dim=-1) -> Tensor
  10808. python_module: linalg
  10809. variants: function
  10810. - func: linalg_vecdot.out(Tensor x, Tensor y, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)
  10811. python_module: linalg
  10812. - func: linalg_matrix_exp(Tensor self) -> Tensor
  10813. python_module: linalg
  10814. variants: function
  10815. dispatch:
  10816. CPU, CUDA: linalg_matrix_exp
  10817. autogen: linalg_matrix_exp.out
  10818. - func: _linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet, Tensor LU, Tensor pivots)
  10819. structured_delegate: _linalg_slogdet.sign
  10820. - func: _linalg_slogdet.sign(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots) -> (Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots)
  10821. structured: True
  10822. dispatch:
  10823. CPU, CUDA: _linalg_slogdet_out
  10824. - func: linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet)
  10825. python_module: linalg
  10826. - func: linalg_slogdet.out(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)
  10827. python_module: linalg
  10828. - func: slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet)
  10829. variants: function, method
  10830. - func: slogdet.out(Tensor self, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)
  10831. variants: function
  10832. - func: logdet(Tensor self) -> Tensor
  10833. variants: function, method
  10834. - func: linalg_eig(Tensor self) -> (Tensor eigenvalues, Tensor eigenvectors)
  10835. python_module: linalg
  10836. variants: function
  10837. dispatch:
  10838. CPU, CUDA: linalg_eig
  10839. - func: linalg_eig.out(Tensor self, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
  10840. python_module: linalg
  10841. dispatch:
  10842. CPU, CUDA: linalg_eig_out
  10843. - func: linalg_eigvals(Tensor self) -> Tensor
  10844. python_module: linalg
  10845. - func: linalg_eigvals.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  10846. python_module: linalg
  10847. # This function is exposes the `compute_v` flag, which is then used to implement `linalg.eigh` and
  10848. # `linalg.eigvalsh` as composite functions that call this one
  10849. - func: _linalg_eigh(Tensor A, str UPLO="L", bool compute_v=True) -> (Tensor eigenvalues, Tensor eigenvectors)
  10850. structured_delegate: _linalg_eigh.eigenvalues
  10851. - func: _linalg_eigh.eigenvalues(Tensor A, str UPLO="L", bool compute_v=True, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
  10852. structured: True
  10853. dispatch:
  10854. CPU, CUDA: _linalg_eigh_out
  10855. - func: linalg_eigh(Tensor self, str UPLO="L") -> (Tensor eigenvalues, Tensor eigenvectors)
  10856. python_module: linalg
  10857. - func: linalg_eigh.eigvals(Tensor self, str UPLO="L", *, Tensor(a!) eigvals, Tensor(b!) eigvecs) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
  10858. python_module: linalg
  10859. - func: linalg_eigvalsh(Tensor self, str UPLO="L") -> Tensor
  10860. python_module: linalg
  10861. - func: linalg_eigvalsh.out(Tensor self, str UPLO="L", *, Tensor(a!) out) -> Tensor(a!)
  10862. python_module: linalg
  10863. - func: linalg_householder_product(Tensor input, Tensor tau) -> Tensor
  10864. python_module: linalg
  10865. variants: function
  10866. dispatch:
  10867. CPU, CUDA: linalg_householder_product
  10868. - func: linalg_householder_product.out(Tensor input, Tensor tau, *, Tensor(a!) out) -> Tensor(a!)
  10869. python_module: linalg
  10870. dispatch:
  10871. CPU, CUDA: linalg_householder_product_out
  10872. - func: linalg_inv_ex(Tensor A, *, bool check_errors=False) -> (Tensor inverse, Tensor info)
  10873. python_module: linalg
  10874. structured_delegate: linalg_inv_ex.inverse
  10875. - func: linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info)
  10876. python_module: linalg
  10877. structured: True
  10878. dispatch:
  10879. CPU, CUDA: linalg_inv_ex_out
  10880. MPS: linalg_inv_ex_out_mps
  10881. - func: linalg_inv(Tensor A) -> Tensor
  10882. python_module: linalg
  10883. - func: linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)
  10884. python_module: linalg
  10885. - func: inverse(Tensor self) -> Tensor
  10886. variants: function, method
  10887. - func: inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  10888. - func: inner(Tensor self, Tensor other) -> Tensor
  10889. variants: function, method
  10890. - func: inner.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
  10891. - func: outer(Tensor self, Tensor vec2) -> Tensor
  10892. variants: function, method
  10893. - func: outer.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)
  10894. # torch.ger, alias for torch.outer
  10895. - func: ger(Tensor self, Tensor vec2) -> Tensor
  10896. variants: function, method
  10897. - func: ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)
  10898. - func: linalg_norm(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
  10899. python_module: linalg
  10900. variants: function
  10901. - func: linalg_norm.ord_str(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
  10902. python_module: linalg
  10903. variants: function
  10904. - func: linalg_norm.out(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
  10905. python_module: linalg
  10906. variants: function
  10907. - func: linalg_norm.ord_str_out(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
  10908. python_module: linalg
  10909. variants: function
  10910. - func: linalg_vector_norm(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
  10911. python_module: linalg
  10912. variants: function
  10913. structured_delegate: linalg_vector_norm.out
  10914. - func: linalg_vector_norm.out(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
  10915. python_module: linalg
  10916. structured: True
  10917. dispatch:
  10918. CPU, CUDA: linalg_vector_norm_out
  10919. - func: linalg_matrix_norm(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
  10920. python_module: linalg
  10921. - func: linalg_matrix_norm.out(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
  10922. python_module: linalg
  10923. - func: linalg_matrix_norm.str_ord(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
  10924. python_module: linalg
  10925. - func: linalg_matrix_norm.str_ord_out(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
  10926. python_module: linalg
  10927. # This function is exposes the `compute_uv` flag, which is then used to implement `linalg.svd` and
  10928. # `linalg.svdvals` as composite functions that call this one
  10929. - func: _linalg_svd(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh)
  10930. variants: function
  10931. structured_delegate: _linalg_svd.U
  10932. - func: _linalg_svd.U(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)
  10933. structured: True
  10934. dispatch:
  10935. CPU, CUDA: _linalg_svd_out
  10936. - func: linalg_svd(Tensor A, bool full_matrices=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh)
  10937. python_module: linalg
  10938. variants: function
  10939. - func: linalg_svd.U(Tensor A, bool full_matrices=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)
  10940. python_module: linalg
  10941. variants: function
  10942. - func: linalg_svdvals(Tensor A, *, str? driver=None) -> Tensor
  10943. python_module: linalg
  10944. variants: function
  10945. - func: linalg_svdvals.out(Tensor A, *, str? driver=None, Tensor(a!) out) -> Tensor(a!)
  10946. python_module: linalg
  10947. variants: function
  10948. - func: linalg_cond(Tensor self, Scalar? p=None) -> Tensor
  10949. python_module: linalg
  10950. variants: function
  10951. - func: linalg_cond.out(Tensor self, Scalar? p=None, *, Tensor(a!) out) -> Tensor(a!)
  10952. python_module: linalg
  10953. variants: function
  10954. - func: linalg_cond.p_str(Tensor self, str p) -> Tensor
  10955. python_module: linalg
  10956. variants: function
  10957. - func: linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!)
  10958. python_module: linalg
  10959. variants: function
  10960. - func: linalg_pinv.atol_rtol_tensor(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor
  10961. python_module: linalg
  10962. variants: function
  10963. dispatch:
  10964. # calls svd, which calls mH() (view op)
  10965. # also calls narrow()
  10966. CompositeExplicitAutogradNonFunctional: linalg_pinv
  10967. - func: linalg_pinv.atol_rtol_tensor_out(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
  10968. python_module: linalg
  10969. variants: function
  10970. dispatch:
  10971. CompositeExplicitAutograd: linalg_pinv_out
  10972. - func: linalg_pinv.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor
  10973. cpp_no_default_args: ['atol', 'rtol']
  10974. python_module: linalg
  10975. variants: function
  10976. - func: linalg_pinv.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
  10977. cpp_no_default_args: ['atol', 'rtol']
  10978. python_module: linalg
  10979. variants: function
  10980. - func: linalg_pinv(Tensor self, float rcond, bool hermitian=False) -> Tensor
  10981. python_module: linalg
  10982. variants: function
  10983. - func: linalg_pinv.rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False) -> Tensor
  10984. python_module: linalg
  10985. variants: function
  10986. - func: linalg_pinv.out(Tensor self, float rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
  10987. python_module: linalg
  10988. variants: function
  10989. - func: linalg_pinv.out_rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
  10990. python_module: linalg
  10991. variants: function
  10992. - func: _linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor LU, Tensor pivots, Tensor info)
  10993. structured_delegate: _linalg_solve_ex.result
  10994. - func: _linalg_solve_ex.result(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info)
  10995. structured: True
  10996. dispatch:
  10997. CPU, CUDA: _linalg_solve_ex_out
  10998. - func: linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor info)
  10999. python_module: linalg
  11000. - func: linalg_solve_ex.out(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) info) -> (Tensor(a!) result, Tensor(b!) info)
  11001. python_module: linalg
  11002. - func: linalg_solve(Tensor A, Tensor B, *, bool left=True) -> Tensor
  11003. python_module: linalg
  11004. - func: linalg_solve.out(Tensor A, Tensor B, *, bool left=True, Tensor(a!) out) -> Tensor(a!)
  11005. python_module: linalg
  11006. - func: linalg_tensorinv(Tensor self, int ind=2) -> Tensor
  11007. python_module: linalg
  11008. variants: function
  11009. - func: linalg_tensorinv.out(Tensor self, int ind=2, *, Tensor(a!) out) -> Tensor(a!)
  11010. python_module: linalg
  11011. variants: function
  11012. - func: linalg_tensorsolve(Tensor self, Tensor other, int[]? dims=None) -> Tensor
  11013. python_module: linalg
  11014. variants: function
  11015. - func: linalg_tensorsolve.out(Tensor self, Tensor other, int[]? dims=None, *, Tensor(a!) out) -> Tensor(a!)
  11016. python_module: linalg
  11017. variants: function
  11018. - func: linalg_qr(Tensor A, str mode='reduced') -> (Tensor Q, Tensor R)
  11019. python_module: linalg
  11020. variants: function
  11021. structured_delegate: linalg_qr.out
  11022. - func: linalg_qr.out(Tensor A, str mode='reduced', *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)
  11023. python_module: linalg
  11024. structured: True
  11025. dispatch:
  11026. CPU, CUDA: linalg_qr_out
  11027. - func: linalg_matrix_power(Tensor self, int n) -> Tensor
  11028. python_module: linalg
  11029. - func: linalg_matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!)
  11030. python_module: linalg
  11031. - func: linalg_matrix_rank.atol_rtol_tensor(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor
  11032. python_module: linalg
  11033. variants: function
  11034. - func: linalg_matrix_rank.atol_rtol_tensor_out(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
  11035. python_module: linalg
  11036. variants: function
  11037. - func: linalg_matrix_rank.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor
  11038. cpp_no_default_args: ['atol', 'rtol']
  11039. python_module: linalg
  11040. variants: function
  11041. - func: linalg_matrix_rank.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
  11042. cpp_no_default_args: ['atol', 'rtol']
  11043. python_module: linalg
  11044. variants: function
  11045. - func: linalg_matrix_rank(Tensor self, float tol, bool hermitian=False) -> Tensor
  11046. python_module: linalg
  11047. variants: function
  11048. - func: linalg_matrix_rank.out(Tensor self, float tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
  11049. python_module: linalg
  11050. variants: function
  11051. - func: linalg_matrix_rank.tol_tensor(Tensor input, Tensor tol, bool hermitian=False) -> Tensor
  11052. python_module: linalg
  11053. variants: function
  11054. - func: linalg_matrix_rank.out_tol_tensor(Tensor input, Tensor tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
  11055. python_module: linalg
  11056. variants: function
  11057. - func: linalg_multi_dot(Tensor[] tensors) -> Tensor
  11058. python_module: linalg
  11059. - func: linalg_multi_dot.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
  11060. python_module: linalg
  11061. ## Functions related to the `torch.nested` namespace
  11062. # Note [nested namespace binding]
  11063. # Functions in the nested python module should have their names start with
  11064. # "nested_" underscore and be bound to the desired Python name in
  11065. # torch/nested/__init__.py, and the desired C++ name in torch/csrc/api/include/torch/nested.h.
  11066. # The "nested_" names should be hidden from the user and not documented.
  11067. - func: nested_to_padded_tensor(Tensor self, float padding, int[]? output_size=None) -> Tensor
  11068. python_module: nested
  11069. variants: function
  11070. ## Functions that are only for testing
  11071. # It is undocumented and should not be used outside of tests.
  11072. - func: _test_serialization_subcmul(Tensor self, Tensor other, Scalar alpha=1) -> Tensor
  11073. # Note: this function is only for testing.
  11074. - func: _test_optional_intlist(Tensor values, int[]? addends) -> Tensor
  11075. python_module: nn
  11076. dispatch:
  11077. CPU: _test_optional_intlist
  11078. autogen: _test_optional_intlist.out
  11079. # Note: this function is only for testing.
  11080. - func: _test_optional_filled_intlist(Tensor values, int[2]? addends) -> Tensor
  11081. python_module: nn
  11082. dispatch:
  11083. CPU: _test_optional_intlist
  11084. autogen: _test_optional_filled_intlist.out
  11085. # Note: this function is only for testing.
  11086. - func: _test_optional_floatlist(Tensor values, float[]? addends) -> Tensor
  11087. python_module: nn
  11088. dispatch:
  11089. CPU: _test_optional_floatlist
  11090. autogen: _test_optional_floatlist.out
  11091. # Note: this function is only for testing.
  11092. - func: _test_string_default(Tensor dummy, str a="\"'\\", str b='"\'\\') -> Tensor
  11093. python_module: nn
  11094. # Note: this function is only for testing.
  11095. - func: _test_ambiguous_defaults.a(Tensor dummy, int a=1, int b=1) -> Tensor
  11096. python_module: nn
  11097. # Note: this function is only for testing.
  11098. - func: _test_ambiguous_defaults.b(Tensor dummy, int a=2, str b="2") -> Tensor
  11099. cpp_no_default_args: ['a', 'b']
  11100. python_module: nn
  11101. # Note: this function is only for testing.
  11102. - func: _test_warn_in_autograd(Tensor self) -> Tensor
  11103. python_module: nn
  11104. dispatch:
  11105. CompositeExplicitAutograd: _test_warn_in_autograd
  11106. autogen: _test_warn_in_autograd.out
  11107. # Note: this function is only for testing.
  11108. - func: _test_autograd_multiple_dispatch.fullcoverage(Tensor self) -> Tensor
  11109. dispatch:
  11110. # the NestedTensor keys are necessary because NestedTensor has been removed
  11111. # from the CompositeExplicitAutograd keyset see Note [NestedTensor Not Included in Backend Keys]
  11112. CompositeExplicitAutograd, NestedTensorCPU, NestedTensorCUDA: _test_autograd_multiple_dispatch_fullcoverage
  11113. autogen: _test_autograd_multiple_dispatch.fullcoverage_out
  11114. # Note: this function is only for testing.
  11115. - func: _test_autograd_multiple_dispatch.ntonly(Tensor self, bool b) -> Tensor
  11116. dispatch:
  11117. CompositeImplicitAutograd, NestedTensorCPU, NestedTensorCUDA: _test_autograd_multiple_dispatch_ntonly
  11118. # Note: this function is only for testing.
  11119. - func: _test_autograd_multiple_dispatch_view(Tensor(a) self) -> Tensor(a)
  11120. dispatch:
  11121. CompositeExplicitAutograd: _test_autograd_multiple_dispatch_view
  11122. # Note: this function is only for testing.
  11123. - func: _test_autograd_multiple_dispatch_view_copy(Tensor self) -> Tensor
  11124. variants: function
  11125. dispatch:
  11126. CompositeExplicitAutogradNonFunctional: _test_autograd_multiple_dispatch_view_copy
  11127. tags: view_copy
  11128. autogen: _test_autograd_multiple_dispatch_view_copy.out
  11129. - func: segment_reduce(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None) -> Tensor
  11130. variants: function
  11131. dispatch:
  11132. CPU, CUDA: segment_reduce_kernel
  11133. autogen: segment_reduce.out
  11134. - func: _segment_reduce_backward(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None) -> Tensor
  11135. variants: function
  11136. dispatch:
  11137. CPU, CUDA: _segment_reduce_backward_kernel
  11138. autogen: _segment_reduce_backward.out
  11139. - func: pad_sequence(Tensor[] sequences, bool batch_first=False, float padding_value=0.0) -> Tensor
  11140. python_module: nn
  11141. variants: function
  11142. - func: flatten_dense_tensors(Tensor[] tensors) -> Tensor
  11143. variants: function
  11144. python_module: nn
  11145. - func: unflatten_dense_tensors(Tensor flat, Tensor[] tensors) -> Tensor[]
  11146. variants: function
  11147. python_module: nn
  11148. - func: _nested_tensor_from_tensor_list(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  11149. variants: function
  11150. dispatch:
  11151. CompositeExplicitAutograd: _nested_tensor_from_tensor_list
  11152. autogen: _nested_tensor_from_tensor_list.out
  11153. - func: _fw_primal_copy(Tensor self, int level) -> Tensor
  11154. variants: function
  11155. dispatch:
  11156. CompositeExplicitAutogradNonFunctional: _fw_primal_copy
  11157. tags: view_copy
  11158. autogen: _fw_primal_copy.out
  11159. - func: _make_dual_copy(Tensor primal, Tensor tangent, int level) -> Tensor
  11160. variants: function
  11161. dispatch:
  11162. CompositeExplicitAutogradNonFunctional: _make_dual_copy
  11163. tags: view_copy
  11164. autogen: _make_dual_copy.out
  11165. - func: view_as_real_copy(Tensor self) -> Tensor
  11166. variants: function
  11167. dispatch:
  11168. CompositeExplicitAutogradNonFunctional: view_as_real_copy
  11169. tags: view_copy
  11170. autogen: view_as_real_copy.out
  11171. - func: view_as_complex_copy(Tensor self) -> Tensor
  11172. variants: function
  11173. dispatch:
  11174. CompositeExplicitAutogradNonFunctional: view_as_complex_copy
  11175. tags: view_copy
  11176. autogen: view_as_complex_copy.out
  11177. - func: _conj_copy(Tensor self) -> Tensor
  11178. variants: function
  11179. dispatch:
  11180. CompositeExplicitAutogradNonFunctional: _conj_copy
  11181. tags: view_copy
  11182. autogen: _conj_copy.out
  11183. - func: _neg_view_copy(Tensor self) -> Tensor
  11184. variants: function
  11185. dispatch:
  11186. CompositeExplicitAutogradNonFunctional: _neg_view_copy
  11187. tags: view_copy
  11188. autogen: _neg_view_copy.out
  11189. - func: as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor
  11190. variants: function
  11191. dispatch:
  11192. CompositeExplicitAutogradNonFunctional: as_strided_copy_symint
  11193. tags: view_copy
  11194. autogen: as_strided_copy.out
  11195. - func: _sparse_broadcast_to_copy(Tensor self, int[] size) -> Tensor
  11196. variants: function
  11197. dispatch:
  11198. CompositeExplicitAutogradNonFunctional: _sparse_broadcast_to_copy
  11199. tags: view_copy
  11200. autogen: _sparse_broadcast_to_copy.out
  11201. - func: diagonal_copy(Tensor self, int offset=0, int dim1=0, int dim2=1) -> Tensor
  11202. variants: function
  11203. dispatch:
  11204. CompositeExplicitAutogradNonFunctional: diagonal_copy
  11205. tags: view_copy
  11206. autogen: diagonal_copy.out
  11207. - func: expand_copy(Tensor self, SymInt[] size, *, bool implicit=False) -> Tensor
  11208. variants: function
  11209. dispatch:
  11210. CompositeExplicitAutogradNonFunctional: expand_copy_symint
  11211. tags: view_copy
  11212. autogen: expand_copy.out
  11213. - func: permute_copy(Tensor self, int[] dims) -> Tensor
  11214. variants: function
  11215. dispatch:
  11216. CompositeExplicitAutogradNonFunctional: permute_copy
  11217. tags: view_copy
  11218. autogen: permute_copy.out
  11219. - func: _reshape_alias_copy(Tensor self, SymInt[] size, SymInt[] stride) -> Tensor
  11220. variants: function
  11221. dispatch:
  11222. CompositeExplicitAutogradNonFunctional: _reshape_alias_copy_symint
  11223. tags: view_copy
  11224. autogen: _reshape_alias_copy.out
  11225. - func: select_copy.int(Tensor self, int dim, SymInt index) -> Tensor
  11226. variants: function
  11227. dispatch:
  11228. CompositeExplicitAutogradNonFunctional: select_copy_symint
  11229. SparseCsrCPU, SparseCsrCUDA: select_copy_sparse_csr
  11230. tags: view_copy
  11231. autogen: select_copy.int_out
  11232. - func: detach_copy(Tensor self) -> Tensor
  11233. variants: function
  11234. dispatch:
  11235. CompositeExplicitAutogradNonFunctional: detach_copy
  11236. tags: view_copy
  11237. autogen: detach_copy.out
  11238. - func: slice_copy.Tensor(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor
  11239. variants: function
  11240. dispatch:
  11241. CompositeExplicitAutogradNonFunctional: slice_copy_Tensor_symint
  11242. tags: view_copy
  11243. autogen: slice_copy.Tensor_out
  11244. - func: split_copy.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]
  11245. variants: function
  11246. dispatch:
  11247. CompositeExplicitAutogradNonFunctional: split_copy_Tensor_symint
  11248. tags: view_copy
  11249. - func: split_with_sizes_copy(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]
  11250. variants: function
  11251. dispatch:
  11252. CompositeExplicitAutogradNonFunctional: split_with_sizes_copy_symint
  11253. tags: view_copy
  11254. - func: squeeze_copy(Tensor self) -> Tensor
  11255. variants: function
  11256. dispatch:
  11257. CompositeExplicitAutogradNonFunctional: squeeze_copy
  11258. tags: view_copy
  11259. autogen: squeeze_copy.out
  11260. - func: squeeze_copy.dim(Tensor self, int dim) -> Tensor
  11261. variants: function
  11262. dispatch:
  11263. CompositeExplicitAutogradNonFunctional: squeeze_copy_dim
  11264. tags: view_copy
  11265. autogen: squeeze_copy.dim_out
  11266. - func: squeeze_copy.dims(Tensor self, int[] dim) -> Tensor
  11267. variants: function
  11268. dispatch:
  11269. CompositeExplicitAutogradNonFunctional: squeeze_copy_dims
  11270. tags: view_copy
  11271. autogen: squeeze_copy.dims_out
  11272. - func: t_copy(Tensor self) -> Tensor
  11273. variants: function
  11274. dispatch:
  11275. CompositeExplicitAutogradNonFunctional: t_copy
  11276. tags: view_copy
  11277. autogen: t_copy.out
  11278. - func: transpose_copy.int(Tensor self, int dim0, int dim1) -> Tensor
  11279. variants: function
  11280. dispatch:
  11281. CompositeExplicitAutogradNonFunctional: transpose_copy_int
  11282. tags: view_copy
  11283. autogen: transpose_copy.int_out
  11284. - func: unsqueeze_copy(Tensor self, int dim) -> Tensor
  11285. variants: function
  11286. dispatch:
  11287. CompositeExplicitAutogradNonFunctional: unsqueeze_copy
  11288. tags: view_copy
  11289. autogen: unsqueeze_copy.out
  11290. - func: _indices_copy(Tensor self) -> Tensor
  11291. variants: function
  11292. dispatch:
  11293. CompositeExplicitAutogradNonFunctional: _indices_copy
  11294. tags: view_copy
  11295. autogen: _indices_copy.out
  11296. - func: _values_copy(Tensor self) -> Tensor
  11297. variants: function
  11298. dispatch:
  11299. CompositeExplicitAutogradNonFunctional: _values_copy
  11300. tags: view_copy
  11301. autogen: _values_copy.out
  11302. - func: indices_copy(Tensor self) -> Tensor
  11303. variants: function
  11304. dispatch:
  11305. CompositeExplicitAutogradNonFunctional: indices_copy
  11306. tags: view_copy
  11307. autogen: indices_copy.out
  11308. - func: values_copy(Tensor self) -> Tensor
  11309. variants: function
  11310. dispatch:
  11311. CompositeExplicitAutogradNonFunctional: values_copy
  11312. tags: view_copy
  11313. autogen: values_copy.out
  11314. - func: crow_indices_copy(Tensor self) -> Tensor
  11315. variants: function
  11316. dispatch:
  11317. CompositeExplicitAutogradNonFunctional: crow_indices_copy
  11318. tags: view_copy
  11319. autogen: crow_indices_copy.out
  11320. - func: col_indices_copy(Tensor self) -> Tensor
  11321. variants: function
  11322. dispatch:
  11323. CompositeExplicitAutogradNonFunctional: col_indices_copy
  11324. tags: view_copy
  11325. autogen: col_indices_copy.out
  11326. - func: ccol_indices_copy(Tensor self) -> Tensor
  11327. variants: function
  11328. dispatch:
  11329. CompositeExplicitAutogradNonFunctional: ccol_indices_copy
  11330. tags: view_copy
  11331. autogen: ccol_indices_copy.out
  11332. - func: row_indices_copy(Tensor self) -> Tensor
  11333. variants: function
  11334. dispatch:
  11335. CompositeExplicitAutogradNonFunctional: row_indices_copy
  11336. tags: view_copy
  11337. autogen: row_indices_copy.out
  11338. - func: unbind_copy.int(Tensor self, int dim=0) -> Tensor[]
  11339. variants: function
  11340. dispatch:
  11341. CompositeExplicitAutogradNonFunctional: unbind_copy_int
  11342. tags: view_copy
  11343. - func: unbind_copy.int_out(Tensor self, int dim=0, *, Tensor(a!)[] out) -> ()
  11344. variants: function
  11345. dispatch:
  11346. CompositeExplicitAutograd: unbind_copy_int_out
  11347. - func: split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()
  11348. variants: function
  11349. dispatch:
  11350. CompositeExplicitAutograd: split_copy_Tensor_out
  11351. - func: split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()
  11352. variants: function
  11353. dispatch:
  11354. CompositeExplicitAutograd: split_with_sizes_copy_out
  11355. - func: view_copy(Tensor self, SymInt[] size) -> Tensor
  11356. variants: function
  11357. dispatch:
  11358. CompositeExplicitAutogradNonFunctional: view_copy_symint
  11359. tags: view_copy
  11360. autogen: view_copy.out
  11361. - func: view_copy.dtype(Tensor self, ScalarType dtype) -> Tensor
  11362. variants: function
  11363. dispatch:
  11364. CompositeExplicitAutogradNonFunctional: view_copy_dtype
  11365. tags: view_copy
  11366. autogen: view_copy.dtype_out
  11367. - func: unfold_copy(Tensor self, int dimension, int size, int step) -> Tensor
  11368. variants: function
  11369. dispatch:
  11370. CompositeExplicitAutogradNonFunctional: unfold_copy
  11371. tags: view_copy
  11372. autogen: unfold_copy.out
  11373. - func: alias_copy(Tensor self) -> Tensor
  11374. variants: function
  11375. dispatch:
  11376. CompositeExplicitAutogradNonFunctional: alias_copy
  11377. tags: view_copy
  11378. autogen: alias_copy.out
  11379. - func: to_padded_tensor(Tensor self, float padding, SymInt[]? output_size=None) -> Tensor
  11380. variants: method
  11381. dispatch:
  11382. NestedTensorCPU: NestedTensor_to_padded_tensor_generic
  11383. NestedTensorCUDA: NestedTensor_to_padded_tensor_cuda
  11384. autogen: to_padded_tensor.out
  11385. - func: _nested_tensor_softmax_with_shape(Tensor self, Tensor query) -> Tensor
  11386. dispatch:
  11387. NestedTensorCPU: NestedTensor_softmax_dropout
  11388. NestedTensorCUDA: NestedTensor_softmax_dropout_cuda
  11389. # Apparently, putting "forward" in the name will cause Python bindings to be skipped, so "fwd" it is.
  11390. - func: _transformer_encoder_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None) -> Tensor
  11391. variants: function
  11392. dispatch:
  11393. CPU, CUDA, NestedTensorCPU, NestedTensorCUDA: transformer_encoder_layer_forward
  11394. autogen: _transformer_encoder_layer_fwd.out
  11395. - func: _native_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None) -> (Tensor, Tensor)
  11396. variants: function
  11397. dispatch:
  11398. CPU, NestedTensorCPU: native_multi_head_attention_cpu
  11399. CUDA, NestedTensorCUDA: native_multi_head_attention_cuda
  11400. autogen: _native_multi_head_attention.out
  11401. - func: scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False) -> Tensor
  11402. python_module: nn
  11403. variants: function
  11404. autogen: scaled_dot_product_attention.out
  11405. # TODO: THIS NEEDS TO BE REMOVED BUT PEOPLE HAVE TRAINED THEIR MODELS WITH THIS OP BUILTIN
  11406. - func: _scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool need_attn_weights=False, bool is_causal=False) -> (Tensor, Tensor)
  11407. python_module: nn
  11408. variants: function
  11409. autogen: _scaled_dot_product_attention.out
  11410. # This aten function is kept so that we can test the choice function from Python
  11411. - func: _fused_sdp_choice(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False) -> int
  11412. dispatch:
  11413. Meta: _fused_sdp_choice_meta
  11414. CPU, NestedTensorCPU: _fused_sdp_choice_cpp
  11415. CUDA, NestedTensorCUDA: _fused_sdp_choice_cuda
  11416. - func: _scaled_dot_product_attention_math(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, Tensor? dropout_mask=None) -> (Tensor, Tensor)
  11417. variants: function
  11418. - func: _scaled_dot_product_flash_attention(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False) -> (Tensor ouput, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, int philox_seed, int philox_offset, Tensor debug_attn_mask)
  11419. dispatch:
  11420. CUDA: _scaled_dot_product_flash_attention_cuda
  11421. NestedTensorCUDA: _scaled_dot_product_flash_attention_nestedtensor_cuda
  11422. - func: _scaled_dot_product_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, float dropout_p, bool is_causal, int philox_seed, int philox_offset) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value)
  11423. variants: function
  11424. dispatch:
  11425. CUDA: _scaled_dot_product_flash_attention_backward_cuda
  11426. - func: _scaled_dot_product_efficient_attention(Tensor query, Tensor key, Tensor value, bool compute_log_sumexp, bool is_causal=False) -> (Tensor, Tensor)
  11427. dispatch:
  11428. CUDA: _scaled_dot_product_efficient_attention_cuda
  11429. NestedTensorCUDA: _scaled_dot_product_efficient_attention_nestedtensor_cuda
  11430. - func: _scaled_dot_product_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, bool is_causal=False, bool chunk_grad_outputs=False) -> (Tensor, Tensor, Tensor)
  11431. dispatch:
  11432. CUDA: _scaled_dot_product_efficient_attention_backward_cuda
  11433. - func: _chunk_grad_outputs_efficient_attention(Tensor query, Tensor key, Tensor value, bool is_causal=False) -> bool
  11434. dispatch:
  11435. CUDA: _chunk_grad_outputs_efficient_attention
  11436. - func: _flash_attention_forward(Tensor query, Tensor key, Tensor value, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, float dropout_p, bool is_causal, bool return_debug_mask) -> (Tensor output, Tensor softmax_logsumexp, int philox_seed, int philox_offset, Tensor debug_attn_mask)
  11437. variants: function
  11438. dispatch:
  11439. CUDA: _flash_attention_forward
  11440. - func: _flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, float dropout_p, bool is_causal, int philox_seed, int philox_offset) -> (Tensor, Tensor, Tensor)
  11441. variants: function
  11442. dispatch:
  11443. CUDA: _flash_attention_backward
  11444. # Returns ouput, logsumexp if compute_logsumexp
  11445. - func: _efficient_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? cu_seqlens_q, Tensor? cu_seqlens_k, int? max_seqlen_q, bool compute_log_sumexp=False, bool causal=False) -> (Tensor, Tensor)
  11446. variants: function
  11447. dispatch:
  11448. CUDA: _efficient_attention_forward
  11449. - func: _efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, bool is_causal=False, bool chunk_grad_outputs=False) -> (Tensor, Tensor, Tensor)
  11450. variants: function
  11451. dispatch:
  11452. CUDA: _efficient_attention_backward
  11453. - func: _triton_scaled_dot_attention(Tensor q, Tensor k, Tensor v, float dropout_p=0.0) -> Tensor
  11454. variants: function
  11455. dispatch:
  11456. CUDA: triton_scaled_dot_attention
  11457. autogen: _triton_scaled_dot_attention.out
  11458. - func: _triton_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None) -> Tensor
  11459. variants: function
  11460. dispatch:
  11461. CUDA: triton_multi_head_attention
  11462. autogen: _triton_multi_head_attention.out
  11463. - func: special_airy_ai(Tensor x) -> Tensor
  11464. python_module: special
  11465. structured_delegate: special_airy_ai.out
  11466. variants: function
  11467. tags: pointwise
  11468. - func: special_airy_ai.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
  11469. dispatch:
  11470. CPU, CUDA: special_airy_ai_out
  11471. python_module: special
  11472. structured_inherits: TensorIteratorBase
  11473. structured: True
  11474. variants: function
  11475. tags: pointwise
  11476. - func: _transformer_decoder_only_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None) -> (Tensor, Tensor, Tensor)
  11477. variants: function
  11478. dispatch:
  11479. CPU, CUDA, NestedTensorCPU, NestedTensorCUDA: transformer_decoder_only_layer_forward
  11480. autogen: _transformer_decoder_only_layer_fwd.out
  11481. - func: _native_decoder_only_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None, bool need_weights=True, bool average_attn_weights=True) -> (Tensor, Tensor, Tensor, Tensor)
  11482. variants: function
  11483. dispatch:
  11484. CPU, CUDA, NestedTensorCPU, NestedTensorCUDA: native_decoder_only_multi_head_attention
  11485. autogen: _native_decoder_only_multi_head_attention.out
  11486. - func: special_bessel_j0(Tensor self) -> Tensor
  11487. python_module: special
  11488. structured_delegate: special_bessel_j0.out
  11489. variants: function
  11490. tags: pointwise
  11491. - func: special_bessel_j0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  11492. dispatch:
  11493. CPU, CUDA: special_bessel_j0_out
  11494. python_module: special
  11495. structured_inherits: TensorIteratorBase
  11496. structured: True
  11497. variants: function
  11498. tags: pointwise
  11499. - func: special_bessel_j1(Tensor self) -> Tensor
  11500. python_module: special
  11501. structured_delegate: special_bessel_j1.out
  11502. variants: function
  11503. tags: pointwise
  11504. - func: special_bessel_j1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  11505. dispatch:
  11506. CPU, CUDA: special_bessel_j1_out
  11507. python_module: special
  11508. structured_inherits: TensorIteratorBase
  11509. structured: True
  11510. variants: function
  11511. tags: pointwise
  11512. - func: special_bessel_y0(Tensor self) -> Tensor
  11513. python_module: special
  11514. structured_delegate: special_bessel_y0.out
  11515. variants: function
  11516. tags: pointwise
  11517. - func: special_bessel_y0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  11518. dispatch:
  11519. CPU, CUDA: special_bessel_y0_out
  11520. python_module: special
  11521. structured_inherits: TensorIteratorBase
  11522. structured: True
  11523. variants: function
  11524. tags: pointwise
  11525. - func: special_bessel_y1(Tensor self) -> Tensor
  11526. python_module: special
  11527. structured_delegate: special_bessel_y1.out
  11528. variants: function
  11529. tags: pointwise
  11530. - func: special_bessel_y1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  11531. dispatch:
  11532. CPU, CUDA: special_bessel_y1_out
  11533. python_module: special
  11534. structured_inherits: TensorIteratorBase
  11535. structured: True
  11536. variants: function
  11537. tags: pointwise
  11538. - func: special_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor
  11539. device_check: NoCheck
  11540. python_module: special
  11541. structured_delegate: special_chebyshev_polynomial_t.out
  11542. variants: function
  11543. tags: pointwise
  11544. - func: special_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor
  11545. device_check: NoCheck
  11546. python_module: special
  11547. variants: function
  11548. tags: pointwise
  11549. - func: special_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor
  11550. device_check: NoCheck
  11551. python_module: special
  11552. variants: function
  11553. tags: pointwise
  11554. - func: special_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
  11555. device_check: NoCheck
  11556. dispatch:
  11557. CPU, CUDA: special_chebyshev_polynomial_t_out
  11558. python_module: special
  11559. structured_inherits: TensorIteratorBase
  11560. structured: True
  11561. variants: function
  11562. tags: pointwise
  11563. - func: special_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
  11564. device_check: NoCheck
  11565. python_module: special
  11566. variants: function
  11567. tags: pointwise
  11568. - func: special_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
  11569. dispatch:
  11570. CompositeExplicitAutograd: special_chebyshev_polynomial_t_out
  11571. device_check: NoCheck
  11572. python_module: special
  11573. variants: function
  11574. tags: pointwise
  11575. - func: special_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor
  11576. device_check: NoCheck
  11577. python_module: special
  11578. structured_delegate: special_chebyshev_polynomial_u.out
  11579. variants: function
  11580. tags: pointwise
  11581. - func: special_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor
  11582. device_check: NoCheck
  11583. python_module: special
  11584. variants: function
  11585. tags: pointwise
  11586. - func: special_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor
  11587. device_check: NoCheck
  11588. python_module: special
  11589. variants: function
  11590. tags: pointwise
  11591. - func: special_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
  11592. device_check: NoCheck
  11593. dispatch:
  11594. CPU, CUDA: special_chebyshev_polynomial_u_out
  11595. python_module: special
  11596. structured_inherits: TensorIteratorBase
  11597. structured: True
  11598. variants: function
  11599. tags: pointwise
  11600. - func: special_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
  11601. device_check: NoCheck
  11602. python_module: special
  11603. variants: function
  11604. tags: pointwise
  11605. - func: special_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
  11606. dispatch:
  11607. CompositeExplicitAutograd: special_chebyshev_polynomial_u_out
  11608. device_check: NoCheck
  11609. python_module: special
  11610. variants: function
  11611. tags: pointwise
  11612. - func: special_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor
  11613. device_check: NoCheck
  11614. python_module: special
  11615. structured_delegate: special_chebyshev_polynomial_v.out
  11616. variants: function
  11617. tags: pointwise
  11618. - func: special_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor
  11619. device_check: NoCheck
  11620. python_module: special
  11621. variants: function
  11622. tags: pointwise
  11623. - func: special_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor
  11624. device_check: NoCheck
  11625. python_module: special
  11626. variants: function
  11627. tags: pointwise
  11628. - func: special_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
  11629. device_check: NoCheck
  11630. dispatch:
  11631. CPU, CUDA: special_chebyshev_polynomial_v_out
  11632. python_module: special
  11633. structured_inherits: TensorIteratorBase
  11634. structured: True
  11635. variants: function
  11636. tags: pointwise
  11637. - func: special_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
  11638. device_check: NoCheck
  11639. python_module: special
  11640. variants: function
  11641. tags: pointwise
  11642. - func: special_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
  11643. dispatch:
  11644. CompositeExplicitAutograd: special_chebyshev_polynomial_v_out
  11645. device_check: NoCheck
  11646. python_module: special
  11647. variants: function
  11648. tags: pointwise
  11649. - func: special_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor
  11650. device_check: NoCheck
  11651. python_module: special
  11652. structured_delegate: special_chebyshev_polynomial_w.out
  11653. variants: function
  11654. tags: pointwise
  11655. - func: special_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor
  11656. device_check: NoCheck
  11657. python_module: special
  11658. variants: function
  11659. tags: pointwise
  11660. - func: special_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor
  11661. device_check: NoCheck
  11662. python_module: special
  11663. variants: function
  11664. tags: pointwise
  11665. - func: special_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
  11666. device_check: NoCheck
  11667. dispatch:
  11668. CPU, CUDA: special_chebyshev_polynomial_w_out
  11669. python_module: special
  11670. structured_inherits: TensorIteratorBase
  11671. structured: True
  11672. variants: function
  11673. tags: pointwise
  11674. - func: special_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
  11675. device_check: NoCheck
  11676. python_module: special
  11677. variants: function
  11678. tags: pointwise
  11679. - func: special_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
  11680. dispatch:
  11681. CompositeExplicitAutograd: special_chebyshev_polynomial_w_out
  11682. device_check: NoCheck
  11683. python_module: special
  11684. variants: function
  11685. tags: pointwise
  11686. - func: special_hermite_polynomial_h(Tensor x, Tensor n) -> Tensor
  11687. device_check: NoCheck
  11688. python_module: special
  11689. structured_delegate: special_hermite_polynomial_h.out
  11690. variants: function
  11691. tags: pointwise
  11692. - func: special_hermite_polynomial_h.x_scalar(Scalar x, Tensor n) -> Tensor
  11693. device_check: NoCheck
  11694. python_module: special
  11695. variants: function
  11696. tags: pointwise
  11697. - func: special_hermite_polynomial_h.n_scalar(Tensor x, Scalar n) -> Tensor
  11698. device_check: NoCheck
  11699. python_module: special
  11700. variants: function
  11701. tags: pointwise
  11702. - func: special_hermite_polynomial_h.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
  11703. device_check: NoCheck
  11704. dispatch:
  11705. CPU, CUDA: special_hermite_polynomial_h_out
  11706. python_module: special
  11707. structured_inherits: TensorIteratorBase
  11708. structured: True
  11709. variants: function
  11710. tags: pointwise
  11711. - func: special_hermite_polynomial_h.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
  11712. device_check: NoCheck
  11713. python_module: special
  11714. variants: function
  11715. tags: pointwise
  11716. - func: special_hermite_polynomial_h.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
  11717. dispatch:
  11718. CompositeExplicitAutograd: special_hermite_polynomial_h_out
  11719. device_check: NoCheck
  11720. python_module: special
  11721. variants: function
  11722. tags: pointwise
  11723. - func: special_hermite_polynomial_he(Tensor x, Tensor n) -> Tensor
  11724. device_check: NoCheck
  11725. python_module: special
  11726. structured_delegate: special_hermite_polynomial_he.out
  11727. variants: function
  11728. tags: pointwise
  11729. - func: special_hermite_polynomial_he.x_scalar(Scalar x, Tensor n) -> Tensor
  11730. device_check: NoCheck
  11731. python_module: special
  11732. variants: function
  11733. tags: pointwise
  11734. - func: special_hermite_polynomial_he.n_scalar(Tensor x, Scalar n) -> Tensor
  11735. device_check: NoCheck
  11736. python_module: special
  11737. variants: function
  11738. tags: pointwise
  11739. - func: special_hermite_polynomial_he.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
  11740. device_check: NoCheck
  11741. dispatch:
  11742. CPU, CUDA: special_hermite_polynomial_he_out
  11743. python_module: special
  11744. structured_inherits: TensorIteratorBase
  11745. structured: True
  11746. variants: function
  11747. tags: pointwise
  11748. - func: special_hermite_polynomial_he.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
  11749. device_check: NoCheck
  11750. python_module: special
  11751. variants: function
  11752. tags: pointwise
  11753. - func: special_hermite_polynomial_he.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
  11754. dispatch:
  11755. CompositeExplicitAutograd: special_hermite_polynomial_he_out
  11756. device_check: NoCheck
  11757. python_module: special
  11758. variants: function
  11759. tags: pointwise
  11760. - func: special_laguerre_polynomial_l(Tensor x, Tensor n) -> Tensor
  11761. device_check: NoCheck
  11762. python_module: special
  11763. structured_delegate: special_laguerre_polynomial_l.out
  11764. variants: function
  11765. tags: pointwise
  11766. - func: special_laguerre_polynomial_l.x_scalar(Scalar x, Tensor n) -> Tensor
  11767. device_check: NoCheck
  11768. python_module: special
  11769. variants: function
  11770. tags: pointwise
  11771. - func: special_laguerre_polynomial_l.n_scalar(Tensor x, Scalar n) -> Tensor
  11772. device_check: NoCheck
  11773. python_module: special
  11774. variants: function
  11775. tags: pointwise
  11776. - func: special_laguerre_polynomial_l.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
  11777. device_check: NoCheck
  11778. dispatch:
  11779. CPU, CUDA: special_laguerre_polynomial_l_out
  11780. python_module: special
  11781. structured_inherits: TensorIteratorBase
  11782. structured: True
  11783. variants: function
  11784. tags: pointwise
  11785. - func: special_laguerre_polynomial_l.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
  11786. device_check: NoCheck
  11787. python_module: special
  11788. variants: function
  11789. tags: pointwise
  11790. - func: special_laguerre_polynomial_l.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
  11791. dispatch:
  11792. CompositeExplicitAutograd: special_laguerre_polynomial_l_out
  11793. device_check: NoCheck
  11794. python_module: special
  11795. variants: function
  11796. tags: pointwise
  11797. - func: special_legendre_polynomial_p(Tensor x, Tensor n) -> Tensor
  11798. device_check: NoCheck
  11799. python_module: special
  11800. structured_delegate: special_legendre_polynomial_p.out
  11801. variants: function
  11802. tags: pointwise
  11803. - func: special_legendre_polynomial_p.x_scalar(Scalar x, Tensor n) -> Tensor
  11804. device_check: NoCheck
  11805. python_module: special
  11806. variants: function
  11807. tags: pointwise
  11808. - func: special_legendre_polynomial_p.n_scalar(Tensor x, Scalar n) -> Tensor
  11809. device_check: NoCheck
  11810. python_module: special
  11811. variants: function
  11812. tags: pointwise
  11813. - func: special_legendre_polynomial_p.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
  11814. device_check: NoCheck
  11815. dispatch:
  11816. CPU, CUDA: special_legendre_polynomial_p_out
  11817. python_module: special
  11818. structured_inherits: TensorIteratorBase
  11819. structured: True
  11820. variants: function
  11821. tags: pointwise
  11822. - func: special_legendre_polynomial_p.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
  11823. device_check: NoCheck
  11824. python_module: special
  11825. variants: function
  11826. tags: pointwise
  11827. - func: special_legendre_polynomial_p.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
  11828. dispatch:
  11829. CompositeExplicitAutograd: special_legendre_polynomial_p_out
  11830. device_check: NoCheck
  11831. python_module: special
  11832. variants: function
  11833. tags: pointwise
  11834. - func: special_modified_bessel_i0(Tensor self) -> Tensor
  11835. python_module: special
  11836. structured_delegate: special_modified_bessel_i0.out
  11837. variants: function
  11838. tags: pointwise
  11839. - func: special_modified_bessel_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  11840. dispatch:
  11841. CPU, CUDA: special_modified_bessel_i0_out
  11842. python_module: special
  11843. structured_inherits: TensorIteratorBase
  11844. structured: True
  11845. variants: function
  11846. tags: pointwise
  11847. - func: special_modified_bessel_i1(Tensor self) -> Tensor
  11848. python_module: special
  11849. structured_delegate: special_modified_bessel_i1.out
  11850. variants: function
  11851. tags: pointwise
  11852. - func: special_modified_bessel_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  11853. dispatch:
  11854. CPU, CUDA: special_modified_bessel_i1_out
  11855. python_module: special
  11856. structured_inherits: TensorIteratorBase
  11857. structured: True
  11858. variants: function
  11859. tags: pointwise
  11860. - func: special_modified_bessel_k0(Tensor self) -> Tensor
  11861. python_module: special
  11862. structured_delegate: special_modified_bessel_k0.out
  11863. variants: function
  11864. tags: pointwise
  11865. - func: special_modified_bessel_k0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  11866. dispatch:
  11867. CPU, CUDA: special_modified_bessel_k0_out
  11868. python_module: special
  11869. structured_inherits: TensorIteratorBase
  11870. structured: True
  11871. variants: function
  11872. tags: pointwise
  11873. - func: special_modified_bessel_k1(Tensor self) -> Tensor
  11874. python_module: special
  11875. structured_delegate: special_modified_bessel_k1.out
  11876. variants: function
  11877. tags: pointwise
  11878. - func: special_modified_bessel_k1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  11879. dispatch:
  11880. CPU, CUDA: special_modified_bessel_k1_out
  11881. python_module: special
  11882. structured_inherits: TensorIteratorBase
  11883. structured: True
  11884. variants: function
  11885. tags: pointwise
  11886. - func: special_scaled_modified_bessel_k0(Tensor x) -> Tensor
  11887. python_module: special
  11888. structured_delegate: special_scaled_modified_bessel_k0.out
  11889. variants: function
  11890. tags: pointwise
  11891. - func: special_scaled_modified_bessel_k0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
  11892. dispatch:
  11893. CPU, CUDA: special_scaled_modified_bessel_k0_out
  11894. python_module: special
  11895. structured_inherits: TensorIteratorBase
  11896. structured: True
  11897. variants: function
  11898. tags: pointwise
  11899. - func: special_scaled_modified_bessel_k1(Tensor x) -> Tensor
  11900. python_module: special
  11901. structured_delegate: special_scaled_modified_bessel_k1.out
  11902. variants: function
  11903. tags: pointwise
  11904. - func: special_scaled_modified_bessel_k1.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
  11905. dispatch:
  11906. CPU, CUDA: special_scaled_modified_bessel_k1_out
  11907. python_module: special
  11908. structured_inherits: TensorIteratorBase
  11909. structured: True
  11910. variants: function
  11911. tags: pointwise
  11912. - func: special_shifted_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor
  11913. device_check: NoCheck
  11914. python_module: special
  11915. structured_delegate: special_shifted_chebyshev_polynomial_t.out
  11916. variants: function
  11917. tags: pointwise
  11918. - func: special_shifted_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor
  11919. device_check: NoCheck
  11920. python_module: special
  11921. variants: function
  11922. tags: pointwise
  11923. - func: special_shifted_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor
  11924. device_check: NoCheck
  11925. python_module: special
  11926. variants: function
  11927. tags: pointwise
  11928. - func: special_shifted_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
  11929. device_check: NoCheck
  11930. dispatch:
  11931. CPU, CUDA: special_shifted_chebyshev_polynomial_t_out
  11932. python_module: special
  11933. structured_inherits: TensorIteratorBase
  11934. structured: True
  11935. variants: function
  11936. tags: pointwise
  11937. - func: special_shifted_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
  11938. device_check: NoCheck
  11939. python_module: special
  11940. variants: function
  11941. tags: pointwise
  11942. - func: special_shifted_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
  11943. dispatch:
  11944. CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_t_out
  11945. device_check: NoCheck
  11946. python_module: special
  11947. variants: function
  11948. tags: pointwise
  11949. - func: special_shifted_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor
  11950. device_check: NoCheck
  11951. python_module: special
  11952. structured_delegate: special_shifted_chebyshev_polynomial_u.out
  11953. variants: function
  11954. tags: pointwise
  11955. - func: special_shifted_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor
  11956. device_check: NoCheck
  11957. python_module: special
  11958. variants: function
  11959. tags: pointwise
  11960. - func: special_shifted_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor
  11961. device_check: NoCheck
  11962. python_module: special
  11963. variants: function
  11964. tags: pointwise
  11965. - func: special_shifted_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
  11966. device_check: NoCheck
  11967. dispatch:
  11968. CPU, CUDA: special_shifted_chebyshev_polynomial_u_out
  11969. python_module: special
  11970. structured_inherits: TensorIteratorBase
  11971. structured: True
  11972. variants: function
  11973. tags: pointwise
  11974. - func: special_shifted_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
  11975. device_check: NoCheck
  11976. python_module: special
  11977. variants: function
  11978. tags: pointwise
  11979. - func: special_shifted_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
  11980. dispatch:
  11981. CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_u_out
  11982. device_check: NoCheck
  11983. python_module: special
  11984. variants: function
  11985. tags: pointwise
  11986. - func: special_shifted_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor
  11987. device_check: NoCheck
  11988. python_module: special
  11989. structured_delegate: special_shifted_chebyshev_polynomial_v.out
  11990. variants: function
  11991. tags: pointwise
  11992. - func: special_shifted_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor
  11993. device_check: NoCheck
  11994. python_module: special
  11995. variants: function
  11996. tags: pointwise
  11997. - func: special_shifted_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor
  11998. device_check: NoCheck
  11999. python_module: special
  12000. variants: function
  12001. tags: pointwise
  12002. - func: special_shifted_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
  12003. device_check: NoCheck
  12004. dispatch:
  12005. CPU, CUDA: special_shifted_chebyshev_polynomial_v_out
  12006. python_module: special
  12007. structured_inherits: TensorIteratorBase
  12008. structured: True
  12009. variants: function
  12010. tags: pointwise
  12011. - func: special_shifted_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
  12012. device_check: NoCheck
  12013. python_module: special
  12014. variants: function
  12015. tags: pointwise
  12016. - func: special_shifted_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
  12017. dispatch:
  12018. CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_v_out
  12019. device_check: NoCheck
  12020. python_module: special
  12021. variants: function
  12022. tags: pointwise
  12023. - func: special_shifted_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor
  12024. device_check: NoCheck
  12025. python_module: special
  12026. structured_delegate: special_shifted_chebyshev_polynomial_w.out
  12027. variants: function
  12028. tags: pointwise
  12029. - func: special_shifted_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor
  12030. device_check: NoCheck
  12031. python_module: special
  12032. variants: function
  12033. tags: pointwise
  12034. - func: special_shifted_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor
  12035. device_check: NoCheck
  12036. python_module: special
  12037. variants: function
  12038. tags: pointwise
  12039. - func: special_shifted_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
  12040. device_check: NoCheck
  12041. dispatch:
  12042. CPU, CUDA: special_shifted_chebyshev_polynomial_w_out
  12043. python_module: special
  12044. structured_inherits: TensorIteratorBase
  12045. structured: True
  12046. variants: function
  12047. tags: pointwise
  12048. - func: special_shifted_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
  12049. device_check: NoCheck
  12050. python_module: special
  12051. variants: function
  12052. tags: pointwise
  12053. - func: special_shifted_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
  12054. dispatch:
  12055. CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_w_out
  12056. device_check: NoCheck
  12057. python_module: special
  12058. variants: function
  12059. tags: pointwise
  12060. - func: special_spherical_bessel_j0(Tensor x) -> Tensor
  12061. python_module: special
  12062. structured_delegate: special_spherical_bessel_j0.out
  12063. variants: function
  12064. tags: pointwise
  12065. - func: special_spherical_bessel_j0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
  12066. dispatch:
  12067. CPU, CUDA: special_spherical_bessel_j0_out
  12068. python_module: special
  12069. structured_inherits: TensorIteratorBase
  12070. structured: True
  12071. variants: function
  12072. tags: pointwise
  12073. # Aux function used in the test TestPythonDispatch.test_kwarg_only_and_positional_default
  12074. # within test/test_python_dispatch.py
  12075. - func: _foobar(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True) -> Tensor
  12076. dispatch:
  12077. CPU: foobar
  12078. autogen: _foobar.out
  12079. # Fused Optimizer CUDA kernels.
  12080. - func: _fused_adam_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
  12081. # Unlike "foreach" functions, lists of tensors should be guaranteed to be on the same device (for now).
  12082. variants: function
  12083. dispatch:
  12084. CUDA: _fused_adam_kernel_cuda_
  12085. autogen: _fused_adam, _fused_adam.out
  12086. - func: _fused_adamw_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
  12087. # Unlike "foreach" functions, lists of tensors should be guaranteed to be on the same device (for now).
  12088. variants: function
  12089. dispatch:
  12090. CUDA: _fused_adamw_kernel_cuda_
  12091. autogen: _fused_adamw, _fused_adamw.out