VmapGeneratedPlumbing.h 1.7 MB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121101221012310124101251012610127101281012910130101311013210133101341013510136101371013810139101401014110142101431014410145101461014710148101491015010151101521015310154101551015610157101581015910160101611016210163101641016510166101671016810169101701017110172101731017410175101761017710178101791018010181101821018310184101851018610187101881018910190101911019210193101941019510196101971019810199102001020110202102031020410205102061020710208102091021010211102121021310214102151021610217102181021910220102211022210223102241022510226102271022810229102301023110232102331023410235102361023710238102391024010241102421024310244102451024610247102481024910250102511025210253102541025510256102571025810259102601026110262102631026410265102661026710268102691027010271102721027310274102751027610277102781027910280102811028210283102841028510286102871028810289102901029110292102931029410295102961029710298102991030010301103021030310304103051030610307103081030910310103111031210313103141031510316103171031810319103201032110322103231032410325103261032710328103291033010331103321033310334103351033610337103381033910340103411034210343103441034510346103471034810349103501035110352103531035410355103561035710358103591036010361103621036310364103651036610367103681036910370103711037210373103741037510376103771037810379103801038110382103831038410385103861038710388103891039010391103921039310394103951039610397103981039910400104011040210403104041040510406104071040810409104101041110412104131041410415104161041710418104191042010421104221042310424104251042610427104281042910430104311043210433104341043510436104371043810439104401044110442104431044410445104461044710448104491045010451104521045310454104551045610457104581045910460104611046210463104641046510466104671046810469104701047110472104731047410475104761047710478104791048010481104821048310484104851048610487104881048910490104911049210493104941049510496104971049810499105001050110502105031050410505105061050710508105091051010511105121051310514105151051610517105181051910520105211052210523105241052510526105271052810529105301053110532105331053410535105361053710538105391054010541105421054310544105451054610547105481054910550105511055210553105541055510556105571055810559105601056110562105631056410565105661056710568105691057010571105721057310574105751057610577105781057910580105811058210583105841058510586105871058810589105901059110592105931059410595105961059710598105991060010601106021060310604106051060610607106081060910610106111061210613106141061510616106171061810619106201062110622106231062410625106261062710628106291063010631106321063310634106351063610637106381063910640106411064210643106441064510646106471064810649106501065110652106531065410655106561065710658106591066010661106621066310664106651066610667106681066910670106711067210673106741067510676106771067810679106801068110682106831068410685106861068710688106891069010691106921069310694106951069610697106981069910700107011070210703107041070510706107071070810709107101071110712107131071410715107161071710718107191072010721107221072310724107251072610727107281072910730107311073210733107341073510736107371073810739107401074110742107431074410745107461074710748107491075010751107521075310754107551075610757107581075910760107611076210763107641076510766107671076810769107701077110772107731077410775107761077710778107791078010781107821078310784107851078610787107881078910790107911079210793107941079510796107971079810799108001080110802108031080410805108061080710808108091081010811108121081310814108151081610817108181081910820108211082210823108241082510826108271082810829108301083110832108331083410835108361083710838108391084010841108421084310844108451084610847108481084910850108511085210853108541085510856108571085810859108601086110862108631086410865108661086710868108691087010871108721087310874108751087610877108781087910880108811088210883108841088510886108871088810889108901089110892108931089410895108961089710898108991090010901109021090310904109051090610907109081090910910109111091210913109141091510916109171091810919109201092110922109231092410925109261092710928109291093010931109321093310934109351093610937109381093910940109411094210943109441094510946109471094810949109501095110952109531095410955109561095710958109591096010961109621096310964109651096610967109681096910970109711097210973109741097510976109771097810979109801098110982109831098410985109861098710988109891099010991109921099310994109951099610997109981099911000110011100211003110041100511006110071100811009110101101111012110131101411015110161101711018110191102011021110221102311024110251102611027110281102911030110311103211033110341103511036110371103811039110401104111042110431104411045110461104711048110491105011051110521105311054110551105611057110581105911060110611106211063110641106511066110671106811069110701107111072110731107411075110761107711078110791108011081110821108311084110851108611087110881108911090110911109211093110941109511096110971109811099111001110111102111031110411105111061110711108111091111011111111121111311114111151111611117111181111911120111211112211123111241112511126111271112811129111301113111132111331113411135111361113711138111391114011141111421114311144111451114611147111481114911150111511115211153111541115511156111571115811159111601116111162111631116411165111661116711168111691117011171111721117311174111751117611177111781117911180111811118211183111841118511186111871118811189111901119111192111931119411195111961119711198111991120011201112021120311204112051120611207112081120911210112111121211213112141121511216112171121811219112201122111222112231122411225112261122711228112291123011231112321123311234112351123611237112381123911240112411124211243112441124511246112471124811249112501125111252112531125411255112561125711258112591126011261112621126311264112651126611267112681126911270112711127211273112741127511276112771127811279112801128111282112831128411285112861128711288112891129011291112921129311294112951129611297112981129911300113011130211303113041130511306113071130811309113101131111312113131131411315113161131711318113191132011321113221132311324113251132611327113281132911330113311133211333113341133511336113371133811339113401134111342113431134411345113461134711348113491135011351113521135311354113551135611357113581135911360113611136211363113641136511366113671136811369113701137111372113731137411375113761137711378113791138011381113821138311384113851138611387113881138911390113911139211393113941139511396113971139811399114001140111402114031140411405114061140711408114091141011411114121141311414114151141611417114181141911420114211142211423114241142511426114271142811429114301143111432114331143411435114361143711438114391144011441114421144311444114451144611447114481144911450114511145211453114541145511456114571145811459114601146111462114631146411465114661146711468114691147011471114721147311474114751147611477114781147911480114811148211483114841148511486114871148811489114901149111492114931149411495114961149711498114991150011501115021150311504115051150611507115081150911510115111151211513115141151511516115171151811519115201152111522115231152411525115261152711528115291153011531115321153311534115351153611537115381153911540115411154211543115441154511546115471154811549115501155111552115531155411555115561155711558115591156011561115621156311564115651156611567115681156911570115711157211573115741157511576115771157811579115801158111582115831158411585115861158711588115891159011591115921159311594115951159611597115981159911600116011160211603116041160511606116071160811609116101161111612116131161411615116161161711618116191162011621116221162311624116251162611627116281162911630116311163211633116341163511636116371163811639116401164111642116431164411645116461164711648116491165011651116521165311654116551165611657116581165911660116611166211663116641166511666116671166811669116701167111672116731167411675116761167711678116791168011681116821168311684116851168611687116881168911690116911169211693116941169511696116971169811699117001170111702117031170411705117061170711708117091171011711117121171311714117151171611717117181171911720117211172211723117241172511726117271172811729117301173111732117331173411735117361173711738117391174011741117421174311744117451174611747117481174911750117511175211753117541175511756117571175811759117601176111762117631176411765117661176711768117691177011771117721177311774117751177611777117781177911780117811178211783117841178511786117871178811789117901179111792117931179411795117961179711798117991180011801118021180311804118051180611807118081180911810118111181211813118141181511816118171181811819118201182111822118231182411825118261182711828118291183011831118321183311834118351183611837118381183911840118411184211843118441184511846118471184811849118501185111852118531185411855118561185711858118591186011861118621186311864118651186611867118681186911870118711187211873118741187511876118771187811879118801188111882118831188411885118861188711888118891189011891118921189311894118951189611897118981189911900119011190211903119041190511906119071190811909119101191111912119131191411915119161191711918119191192011921119221192311924119251192611927119281192911930119311193211933119341193511936119371193811939119401194111942119431194411945119461194711948119491195011951119521195311954119551195611957119581195911960119611196211963119641196511966119671196811969119701197111972119731197411975119761197711978119791198011981119821198311984119851198611987119881198911990119911199211993119941199511996119971199811999120001200112002120031200412005120061200712008120091201012011120121201312014120151201612017120181201912020120211202212023120241202512026120271202812029120301203112032120331203412035120361203712038120391204012041120421204312044120451204612047120481204912050120511205212053120541205512056120571205812059120601206112062120631206412065120661206712068120691207012071120721207312074120751207612077120781207912080120811208212083120841208512086120871208812089120901209112092120931209412095120961209712098120991210012101121021210312104121051210612107121081210912110121111211212113121141211512116121171211812119121201212112122121231212412125121261212712128121291213012131121321213312134121351213612137121381213912140121411214212143121441214512146121471214812149121501215112152121531215412155121561215712158121591216012161121621216312164121651216612167121681216912170121711217212173121741217512176121771217812179121801218112182121831218412185121861218712188121891219012191121921219312194121951219612197121981219912200122011220212203122041220512206122071220812209122101221112212122131221412215122161221712218122191222012221122221222312224122251222612227122281222912230122311223212233122341223512236122371223812239122401224112242122431224412245122461224712248122491225012251122521225312254122551225612257122581225912260122611226212263122641226512266122671226812269122701227112272122731227412275122761227712278122791228012281122821228312284122851228612287122881228912290122911229212293122941229512296122971229812299123001230112302123031230412305123061230712308123091231012311123121231312314123151231612317123181231912320123211232212323123241232512326123271232812329123301233112332123331233412335123361233712338123391234012341123421234312344123451234612347123481234912350123511235212353123541235512356123571235812359123601236112362123631236412365123661236712368123691237012371123721237312374123751237612377123781237912380123811238212383123841238512386123871238812389123901239112392123931239412395123961239712398123991240012401124021240312404124051240612407124081240912410124111241212413124141241512416124171241812419124201242112422124231242412425124261242712428124291243012431124321243312434124351243612437124381243912440124411244212443124441244512446124471244812449124501245112452124531245412455124561245712458124591246012461124621246312464124651246612467124681246912470124711247212473124741247512476124771247812479124801248112482124831248412485124861248712488124891249012491124921249312494124951249612497124981249912500125011250212503125041250512506125071250812509125101251112512125131251412515125161251712518125191252012521125221252312524125251252612527125281252912530125311253212533125341253512536125371253812539125401254112542125431254412545125461254712548125491255012551125521255312554125551255612557125581255912560125611256212563125641256512566125671256812569125701257112572125731257412575125761257712578125791258012581125821258312584125851258612587125881258912590125911259212593125941259512596125971259812599126001260112602126031260412605126061260712608126091261012611126121261312614126151261612617126181261912620126211262212623126241262512626126271262812629126301263112632126331263412635126361263712638126391264012641126421264312644126451264612647126481264912650126511265212653126541265512656126571265812659126601266112662126631266412665126661266712668126691267012671126721267312674126751267612677126781267912680126811268212683126841268512686126871268812689126901269112692126931269412695126961269712698126991270012701127021270312704127051270612707127081270912710127111271212713127141271512716127171271812719127201272112722127231272412725127261272712728127291273012731127321273312734127351273612737127381273912740127411274212743127441274512746127471274812749127501275112752127531275412755127561275712758127591276012761127621276312764127651276612767127681276912770127711277212773127741277512776127771277812779127801278112782127831278412785127861278712788127891279012791127921279312794127951279612797127981279912800128011280212803128041280512806128071280812809128101281112812128131281412815128161281712818128191282012821128221282312824128251282612827128281282912830128311283212833128341283512836128371283812839128401284112842128431284412845128461284712848128491285012851128521285312854128551285612857128581285912860128611286212863128641286512866128671286812869128701287112872128731287412875128761287712878128791288012881128821288312884128851288612887128881288912890128911289212893128941289512896128971289812899129001290112902129031290412905129061290712908129091291012911129121291312914129151291612917129181291912920129211292212923129241292512926129271292812929129301293112932129331293412935129361293712938129391294012941129421294312944129451294612947129481294912950129511295212953129541295512956129571295812959129601296112962129631296412965129661296712968129691297012971129721297312974129751297612977129781297912980129811298212983129841298512986129871298812989129901299112992129931299412995129961299712998129991300013001130021300313004130051300613007130081300913010130111301213013130141301513016130171301813019130201302113022130231302413025130261302713028130291303013031130321303313034130351303613037130381303913040130411304213043130441304513046130471304813049130501305113052130531305413055130561305713058130591306013061130621306313064130651306613067130681306913070130711307213073130741307513076130771307813079130801308113082130831308413085130861308713088130891309013091130921309313094130951309613097130981309913100131011310213103131041310513106131071310813109131101311113112131131311413115131161311713118131191312013121131221312313124131251312613127131281312913130131311313213133131341313513136131371313813139131401314113142131431314413145131461314713148131491315013151131521315313154131551315613157131581315913160131611316213163131641316513166131671316813169131701317113172131731317413175131761317713178131791318013181131821318313184131851318613187131881318913190131911319213193131941319513196131971319813199132001320113202132031320413205132061320713208132091321013211132121321313214132151321613217132181321913220132211322213223132241322513226132271322813229132301323113232132331323413235132361323713238132391324013241132421324313244132451324613247132481324913250132511325213253132541325513256132571325813259132601326113262132631326413265132661326713268132691327013271132721327313274132751327613277132781327913280132811328213283132841328513286132871328813289132901329113292132931329413295132961329713298132991330013301133021330313304133051330613307133081330913310133111331213313133141331513316133171331813319133201332113322133231332413325133261332713328133291333013331133321333313334133351333613337133381333913340133411334213343133441334513346133471334813349133501335113352133531335413355133561335713358133591336013361133621336313364133651336613367133681336913370133711337213373133741337513376133771337813379133801338113382133831338413385133861338713388133891339013391133921339313394133951339613397133981339913400134011340213403134041340513406134071340813409134101341113412134131341413415134161341713418134191342013421134221342313424134251342613427134281342913430134311343213433134341343513436134371343813439134401344113442134431344413445134461344713448134491345013451134521345313454134551345613457134581345913460134611346213463134641346513466134671346813469134701347113472134731347413475134761347713478134791348013481134821348313484134851348613487134881348913490134911349213493134941349513496134971349813499135001350113502135031350413505135061350713508135091351013511135121351313514135151351613517135181351913520135211352213523135241352513526135271352813529135301353113532135331353413535135361353713538135391354013541135421354313544135451354613547135481354913550135511355213553135541355513556135571355813559135601356113562135631356413565135661356713568135691357013571135721357313574135751357613577135781357913580135811358213583135841358513586135871358813589135901359113592135931359413595135961359713598135991360013601136021360313604136051360613607136081360913610136111361213613136141361513616136171361813619136201362113622136231362413625136261362713628136291363013631136321363313634136351363613637136381363913640136411364213643136441364513646136471364813649136501365113652136531365413655136561365713658136591366013661136621366313664136651366613667136681366913670136711367213673136741367513676136771367813679136801368113682136831368413685136861368713688136891369013691136921369313694136951369613697136981369913700137011370213703137041370513706137071370813709137101371113712137131371413715137161371713718137191372013721137221372313724137251372613727137281372913730137311373213733137341373513736137371373813739137401374113742137431374413745137461374713748137491375013751137521375313754137551375613757137581375913760137611376213763137641376513766137671376813769137701377113772137731377413775137761377713778137791378013781137821378313784137851378613787137881378913790137911379213793137941379513796137971379813799138001380113802138031380413805138061380713808138091381013811138121381313814138151381613817138181381913820138211382213823138241382513826138271382813829138301383113832138331383413835138361383713838138391384013841138421384313844138451384613847138481384913850138511385213853138541385513856138571385813859138601386113862138631386413865138661386713868138691387013871138721387313874138751387613877138781387913880138811388213883138841388513886138871388813889138901389113892138931389413895138961389713898138991390013901139021390313904139051390613907139081390913910139111391213913139141391513916139171391813919139201392113922139231392413925139261392713928139291393013931139321393313934139351393613937139381393913940139411394213943139441394513946139471394813949139501395113952139531395413955139561395713958139591396013961139621396313964139651396613967139681396913970139711397213973139741397513976139771397813979139801398113982139831398413985139861398713988139891399013991139921399313994139951399613997139981399914000140011400214003140041400514006140071400814009140101401114012140131401414015140161401714018140191402014021140221402314024140251402614027140281402914030140311403214033140341403514036140371403814039140401404114042140431404414045140461404714048140491405014051140521405314054140551405614057140581405914060140611406214063140641406514066140671406814069140701407114072140731407414075140761407714078140791408014081140821408314084140851408614087140881408914090140911409214093140941409514096140971409814099141001410114102141031410414105141061410714108141091411014111141121411314114141151411614117141181411914120141211412214123141241412514126141271412814129141301413114132141331413414135141361413714138141391414014141141421414314144141451414614147141481414914150141511415214153141541415514156141571415814159141601416114162141631416414165141661416714168141691417014171141721417314174141751417614177141781417914180141811418214183141841418514186141871418814189141901419114192141931419414195141961419714198141991420014201142021420314204142051420614207142081420914210142111421214213142141421514216142171421814219142201422114222142231422414225142261422714228142291423014231142321423314234142351423614237142381423914240142411424214243142441424514246142471424814249142501425114252142531425414255142561425714258142591426014261142621426314264142651426614267142681426914270142711427214273142741427514276142771427814279142801428114282142831428414285142861428714288142891429014291142921429314294142951429614297142981429914300143011430214303143041430514306143071430814309143101431114312143131431414315143161431714318143191432014321143221432314324143251432614327143281432914330143311433214333143341433514336143371433814339143401434114342143431434414345143461434714348143491435014351143521435314354143551435614357143581435914360143611436214363143641436514366143671436814369143701437114372143731437414375143761437714378143791438014381143821438314384143851438614387143881438914390143911439214393143941439514396143971439814399144001440114402144031440414405144061440714408144091441014411144121441314414144151441614417144181441914420144211442214423144241442514426144271442814429144301443114432144331443414435144361443714438144391444014441144421444314444144451444614447144481444914450144511445214453144541445514456144571445814459144601446114462144631446414465144661446714468144691447014471144721447314474144751447614477144781447914480144811448214483144841448514486144871448814489144901449114492144931449414495144961449714498144991450014501145021450314504145051450614507145081450914510145111451214513145141451514516145171451814519145201452114522145231452414525145261452714528145291453014531145321453314534145351453614537145381453914540145411454214543145441454514546145471454814549145501455114552145531455414555145561455714558145591456014561145621456314564145651456614567145681456914570145711457214573145741457514576145771457814579145801458114582145831458414585145861458714588145891459014591145921459314594145951459614597145981459914600146011460214603146041460514606146071460814609146101461114612146131461414615146161461714618146191462014621146221462314624146251462614627146281462914630146311463214633146341463514636146371463814639146401464114642146431464414645146461464714648146491465014651146521465314654146551465614657146581465914660146611466214663146641466514666146671466814669146701467114672146731467414675146761467714678146791468014681146821468314684146851468614687146881468914690146911469214693146941469514696146971469814699147001470114702147031470414705147061470714708147091471014711147121471314714147151471614717147181471914720147211472214723147241472514726147271472814729147301473114732147331473414735147361473714738147391474014741147421474314744147451474614747147481474914750147511475214753147541475514756147571475814759147601476114762147631476414765147661476714768147691477014771147721477314774147751477614777147781477914780147811478214783147841478514786147871478814789147901479114792147931479414795147961479714798147991480014801148021480314804148051480614807148081480914810148111481214813148141481514816148171481814819148201482114822148231482414825148261482714828148291483014831148321483314834148351483614837148381483914840148411484214843148441484514846148471484814849148501485114852148531485414855148561485714858148591486014861148621486314864148651486614867148681486914870148711487214873148741487514876148771487814879148801488114882148831488414885148861488714888148891489014891148921489314894148951489614897148981489914900149011490214903149041490514906149071490814909149101491114912149131491414915149161491714918149191492014921149221492314924149251492614927149281492914930149311493214933149341493514936149371493814939149401494114942149431494414945149461494714948149491495014951149521495314954149551495614957149581495914960149611496214963149641496514966149671496814969149701497114972149731497414975149761497714978149791498014981149821498314984149851498614987149881498914990149911499214993149941499514996149971499814999150001500115002150031500415005150061500715008150091501015011150121501315014150151501615017150181501915020150211502215023150241502515026150271502815029150301503115032150331503415035150361503715038150391504015041150421504315044150451504615047150481504915050150511505215053150541505515056150571505815059150601506115062150631506415065150661506715068150691507015071150721507315074150751507615077150781507915080150811508215083150841508515086150871508815089150901509115092150931509415095150961509715098150991510015101151021510315104151051510615107151081510915110151111511215113151141511515116151171511815119151201512115122151231512415125151261512715128151291513015131151321513315134151351513615137151381513915140151411514215143151441514515146151471514815149151501515115152151531515415155151561515715158151591516015161151621516315164151651516615167151681516915170151711517215173151741517515176151771517815179151801518115182151831518415185151861518715188151891519015191151921519315194151951519615197151981519915200152011520215203152041520515206152071520815209152101521115212152131521415215152161521715218152191522015221152221522315224152251522615227152281522915230152311523215233152341523515236152371523815239152401524115242152431524415245152461524715248152491525015251152521525315254152551525615257152581525915260152611526215263152641526515266152671526815269152701527115272152731527415275152761527715278152791528015281152821528315284152851528615287152881528915290152911529215293152941529515296152971529815299153001530115302153031530415305153061530715308153091531015311153121531315314153151531615317153181531915320153211532215323153241532515326153271532815329153301533115332153331533415335153361533715338153391534015341153421534315344153451534615347153481534915350153511535215353153541535515356153571535815359153601536115362153631536415365153661536715368153691537015371153721537315374153751537615377153781537915380153811538215383153841538515386153871538815389153901539115392153931539415395153961539715398153991540015401154021540315404154051540615407154081540915410154111541215413154141541515416154171541815419154201542115422154231542415425154261542715428154291543015431154321543315434154351543615437154381543915440154411544215443154441544515446154471544815449154501545115452154531545415455154561545715458154591546015461154621546315464154651546615467154681546915470154711547215473154741547515476154771547815479154801548115482154831548415485154861548715488154891549015491154921549315494154951549615497154981549915500155011550215503155041550515506155071550815509155101551115512155131551415515155161551715518155191552015521155221552315524155251552615527155281552915530155311553215533155341553515536155371553815539155401554115542155431554415545155461554715548155491555015551155521555315554155551555615557155581555915560155611556215563155641556515566155671556815569155701557115572155731557415575155761557715578155791558015581155821558315584155851558615587155881558915590155911559215593155941559515596155971559815599156001560115602156031560415605156061560715608156091561015611156121561315614156151561615617156181561915620156211562215623156241562515626156271562815629156301563115632156331563415635156361563715638156391564015641156421564315644156451564615647156481564915650156511565215653156541565515656156571565815659156601566115662156631566415665156661566715668156691567015671156721567315674156751567615677156781567915680156811568215683156841568515686156871568815689156901569115692156931569415695156961569715698156991570015701157021570315704157051570615707157081570915710157111571215713157141571515716157171571815719157201572115722157231572415725157261572715728157291573015731157321573315734157351573615737157381573915740157411574215743157441574515746157471574815749157501575115752157531575415755157561575715758157591576015761157621576315764157651576615767157681576915770157711577215773157741577515776157771577815779157801578115782157831578415785157861578715788157891579015791157921579315794157951579615797157981579915800158011580215803158041580515806158071580815809158101581115812158131581415815158161581715818158191582015821158221582315824158251582615827158281582915830158311583215833158341583515836158371583815839158401584115842158431584415845158461584715848158491585015851158521585315854158551585615857158581585915860158611586215863158641586515866158671586815869158701587115872158731587415875158761587715878158791588015881158821588315884158851588615887158881588915890158911589215893158941589515896158971589815899159001590115902159031590415905159061590715908159091591015911159121591315914159151591615917159181591915920159211592215923159241592515926159271592815929159301593115932159331593415935159361593715938159391594015941159421594315944159451594615947159481594915950159511595215953159541595515956159571595815959159601596115962159631596415965159661596715968159691597015971159721597315974159751597615977159781597915980159811598215983159841598515986159871598815989159901599115992159931599415995159961599715998159991600016001160021600316004160051600616007160081600916010160111601216013160141601516016160171601816019160201602116022160231602416025160261602716028160291603016031160321603316034160351603616037160381603916040160411604216043160441604516046160471604816049160501605116052160531605416055160561605716058160591606016061160621606316064160651606616067160681606916070160711607216073160741607516076160771607816079160801608116082160831608416085160861608716088160891609016091160921609316094160951609616097160981609916100161011610216103161041610516106161071610816109161101611116112161131611416115161161611716118161191612016121161221612316124161251612616127161281612916130161311613216133161341613516136161371613816139161401614116142161431614416145161461614716148161491615016151161521615316154161551615616157161581615916160161611616216163161641616516166161671616816169161701617116172161731617416175161761617716178161791618016181161821618316184161851618616187161881618916190161911619216193161941619516196161971619816199162001620116202162031620416205162061620716208162091621016211162121621316214162151621616217162181621916220162211622216223162241622516226162271622816229162301623116232162331623416235162361623716238162391624016241162421624316244162451624616247162481624916250162511625216253162541625516256162571625816259162601626116262162631626416265162661626716268162691627016271162721627316274162751627616277162781627916280162811628216283162841628516286162871628816289162901629116292162931629416295162961629716298162991630016301163021630316304163051630616307163081630916310163111631216313163141631516316163171631816319163201632116322163231632416325163261632716328163291633016331163321633316334163351633616337163381633916340163411634216343163441634516346163471634816349163501635116352163531635416355163561635716358163591636016361163621636316364163651636616367163681636916370163711637216373163741637516376163771637816379163801638116382163831638416385163861638716388163891639016391163921639316394163951639616397163981639916400164011640216403164041640516406164071640816409164101641116412164131641416415164161641716418164191642016421164221642316424164251642616427164281642916430164311643216433164341643516436164371643816439164401644116442164431644416445164461644716448164491645016451164521645316454164551645616457164581645916460164611646216463164641646516466164671646816469164701647116472164731647416475164761647716478164791648016481164821648316484164851648616487164881648916490164911649216493164941649516496164971649816499165001650116502165031650416505165061650716508165091651016511165121651316514165151651616517165181651916520165211652216523165241652516526165271652816529165301653116532165331653416535165361653716538165391654016541165421654316544165451654616547165481654916550165511655216553165541655516556165571655816559165601656116562165631656416565165661656716568165691657016571165721657316574165751657616577165781657916580165811658216583165841658516586165871658816589165901659116592165931659416595165961659716598165991660016601166021660316604166051660616607166081660916610166111661216613166141661516616166171661816619166201662116622166231662416625166261662716628166291663016631166321663316634166351663616637166381663916640166411664216643166441664516646166471664816649166501665116652166531665416655166561665716658166591666016661166621666316664166651666616667166681666916670166711667216673166741667516676166771667816679166801668116682166831668416685166861668716688166891669016691166921669316694166951669616697166981669916700167011670216703167041670516706167071670816709167101671116712167131671416715167161671716718167191672016721167221672316724167251672616727167281672916730167311673216733167341673516736167371673816739167401674116742167431674416745167461674716748167491675016751167521675316754167551675616757167581675916760167611676216763167641676516766167671676816769167701677116772167731677416775167761677716778167791678016781167821678316784167851678616787167881678916790167911679216793167941679516796167971679816799168001680116802168031680416805168061680716808168091681016811168121681316814168151681616817168181681916820168211682216823168241682516826168271682816829168301683116832168331683416835168361683716838168391684016841168421684316844168451684616847168481684916850168511685216853168541685516856168571685816859168601686116862168631686416865168661686716868168691687016871168721687316874168751687616877168781687916880168811688216883168841688516886168871688816889168901689116892168931689416895168961689716898168991690016901169021690316904169051690616907169081690916910169111691216913169141691516916169171691816919169201692116922169231692416925169261692716928169291693016931169321693316934169351693616937169381693916940169411694216943169441694516946169471694816949169501695116952169531695416955169561695716958169591696016961169621696316964169651696616967169681696916970169711697216973169741697516976169771697816979169801698116982169831698416985169861698716988169891699016991169921699316994169951699616997169981699917000170011700217003170041700517006170071700817009170101701117012170131701417015170161701717018170191702017021170221702317024170251702617027170281702917030170311703217033170341703517036170371703817039170401704117042170431704417045170461704717048170491705017051170521705317054170551705617057170581705917060170611706217063170641706517066170671706817069170701707117072170731707417075170761707717078170791708017081170821708317084170851708617087170881708917090170911709217093170941709517096170971709817099171001710117102171031710417105171061710717108171091711017111171121711317114171151711617117171181711917120171211712217123171241712517126171271712817129171301713117132171331713417135171361713717138171391714017141171421714317144171451714617147171481714917150171511715217153171541715517156171571715817159171601716117162171631716417165171661716717168171691717017171171721717317174171751717617177171781717917180171811718217183171841718517186171871718817189171901719117192171931719417195171961719717198171991720017201172021720317204172051720617207172081720917210172111721217213172141721517216172171721817219172201722117222172231722417225172261722717228172291723017231172321723317234172351723617237172381723917240172411724217243172441724517246172471724817249172501725117252172531725417255172561725717258172591726017261172621726317264172651726617267172681726917270172711727217273172741727517276172771727817279172801728117282172831728417285172861728717288172891729017291172921729317294172951729617297172981729917300173011730217303173041730517306173071730817309173101731117312173131731417315173161731717318173191732017321173221732317324173251732617327173281732917330173311733217333173341733517336173371733817339173401734117342173431734417345173461734717348173491735017351173521735317354173551735617357173581735917360173611736217363173641736517366173671736817369173701737117372173731737417375173761737717378173791738017381173821738317384173851738617387173881738917390173911739217393173941739517396173971739817399174001740117402174031740417405174061740717408174091741017411174121741317414174151741617417174181741917420174211742217423174241742517426174271742817429174301743117432174331743417435174361743717438174391744017441174421744317444174451744617447174481744917450174511745217453174541745517456174571745817459174601746117462174631746417465174661746717468174691747017471174721747317474174751747617477174781747917480174811748217483174841748517486174871748817489174901749117492174931749417495174961749717498174991750017501175021750317504175051750617507175081750917510175111751217513175141751517516175171751817519175201752117522175231752417525175261752717528175291753017531175321753317534175351753617537175381753917540175411754217543175441754517546175471754817549175501755117552175531755417555175561755717558175591756017561175621756317564175651756617567175681756917570175711757217573175741757517576175771757817579175801758117582175831758417585175861758717588175891759017591175921759317594175951759617597175981759917600176011760217603176041760517606176071760817609176101761117612176131761417615176161761717618176191762017621176221762317624176251762617627176281762917630176311763217633176341763517636176371763817639176401764117642176431764417645176461764717648176491765017651176521765317654176551765617657176581765917660176611766217663176641766517666176671766817669176701767117672176731767417675176761767717678176791768017681176821768317684176851768617687176881768917690176911769217693176941769517696176971769817699177001770117702177031770417705177061770717708177091771017711177121771317714177151771617717177181771917720177211772217723177241772517726177271772817729177301773117732177331773417735177361773717738177391774017741177421774317744177451774617747177481774917750177511775217753177541775517756177571775817759177601776117762177631776417765177661776717768177691777017771177721777317774177751777617777177781777917780177811778217783177841778517786177871778817789177901779117792177931779417795177961779717798177991780017801178021780317804178051780617807178081780917810178111781217813178141781517816178171781817819178201782117822178231782417825178261782717828178291783017831178321783317834178351783617837178381783917840178411784217843178441784517846178471784817849178501785117852178531785417855178561785717858178591786017861178621786317864178651786617867178681786917870178711787217873178741787517876178771787817879178801788117882178831788417885178861788717888178891789017891178921789317894178951789617897178981789917900179011790217903179041790517906179071790817909179101791117912179131791417915179161791717918179191792017921179221792317924179251792617927179281792917930179311793217933179341793517936179371793817939179401794117942179431794417945179461794717948179491795017951179521795317954179551795617957179581795917960179611796217963179641796517966179671796817969179701797117972179731797417975179761797717978179791798017981179821798317984179851798617987179881798917990179911799217993179941799517996179971799817999180001800118002180031800418005180061800718008180091801018011180121801318014180151801618017180181801918020180211802218023180241802518026180271802818029180301803118032180331803418035180361803718038180391804018041180421804318044180451804618047180481804918050180511805218053180541805518056180571805818059180601806118062180631806418065180661806718068180691807018071180721807318074180751807618077180781807918080180811808218083180841808518086180871808818089180901809118092180931809418095180961809718098180991810018101181021810318104181051810618107181081810918110181111811218113181141811518116181171811818119181201812118122181231812418125181261812718128181291813018131181321813318134181351813618137181381813918140181411814218143181441814518146181471814818149181501815118152181531815418155181561815718158181591816018161181621816318164181651816618167181681816918170181711817218173181741817518176181771817818179181801818118182181831818418185181861818718188181891819018191181921819318194181951819618197181981819918200182011820218203182041820518206182071820818209182101821118212182131821418215182161821718218182191822018221182221822318224182251822618227182281822918230182311823218233182341823518236182371823818239182401824118242182431824418245182461824718248182491825018251182521825318254182551825618257182581825918260182611826218263182641826518266182671826818269182701827118272182731827418275182761827718278182791828018281182821828318284182851828618287182881828918290182911829218293182941829518296182971829818299183001830118302183031830418305183061830718308183091831018311183121831318314183151831618317183181831918320183211832218323183241832518326183271832818329183301833118332183331833418335183361833718338183391834018341183421834318344183451834618347183481834918350183511835218353183541835518356183571835818359183601836118362183631836418365183661836718368183691837018371183721837318374183751837618377183781837918380183811838218383183841838518386183871838818389183901839118392183931839418395183961839718398183991840018401184021840318404184051840618407184081840918410184111841218413184141841518416184171841818419184201842118422184231842418425184261842718428184291843018431184321843318434184351843618437184381843918440184411844218443184441844518446184471844818449184501845118452184531845418455184561845718458184591846018461184621846318464184651846618467184681846918470184711847218473184741847518476184771847818479184801848118482184831848418485184861848718488184891849018491184921849318494184951849618497184981849918500185011850218503185041850518506185071850818509185101851118512185131851418515185161851718518185191852018521185221852318524185251852618527185281852918530185311853218533185341853518536185371853818539185401854118542185431854418545185461854718548185491855018551185521855318554185551855618557185581855918560185611856218563185641856518566185671856818569185701857118572185731857418575185761857718578185791858018581185821858318584185851858618587185881858918590185911859218593185941859518596185971859818599186001860118602186031860418605186061860718608186091861018611186121861318614186151861618617186181861918620186211862218623186241862518626186271862818629186301863118632186331863418635186361863718638186391864018641186421864318644186451864618647186481864918650186511865218653186541865518656186571865818659186601866118662186631866418665186661866718668186691867018671186721867318674186751867618677186781867918680186811868218683186841868518686186871868818689186901869118692186931869418695186961869718698186991870018701187021870318704187051870618707187081870918710187111871218713187141871518716187171871818719187201872118722187231872418725187261872718728187291873018731187321873318734187351873618737187381873918740187411874218743187441874518746187471874818749187501875118752187531875418755187561875718758187591876018761187621876318764187651876618767187681876918770187711877218773187741877518776187771877818779187801878118782187831878418785187861878718788187891879018791187921879318794187951879618797187981879918800188011880218803188041880518806188071880818809188101881118812188131881418815188161881718818188191882018821188221882318824188251882618827188281882918830188311883218833188341883518836188371883818839188401884118842188431884418845188461884718848188491885018851188521885318854188551885618857188581885918860188611886218863188641886518866188671886818869188701887118872188731887418875188761887718878188791888018881188821888318884188851888618887188881888918890188911889218893188941889518896188971889818899189001890118902189031890418905189061890718908189091891018911189121891318914189151891618917189181891918920189211892218923189241892518926189271892818929189301893118932189331893418935189361893718938189391894018941189421894318944189451894618947189481894918950189511895218953189541895518956189571895818959189601896118962189631896418965189661896718968189691897018971189721897318974189751897618977189781897918980189811898218983189841898518986189871898818989189901899118992189931899418995189961899718998189991900019001190021900319004190051900619007190081900919010190111901219013190141901519016190171901819019190201902119022190231902419025190261902719028190291903019031190321903319034190351903619037190381903919040190411904219043190441904519046190471904819049190501905119052190531905419055190561905719058190591906019061190621906319064190651906619067190681906919070190711907219073190741907519076190771907819079190801908119082190831908419085190861908719088190891909019091190921909319094190951909619097190981909919100191011910219103191041910519106191071910819109191101911119112191131911419115191161911719118191191912019121191221912319124191251912619127191281912919130191311913219133191341913519136191371913819139191401914119142191431914419145191461914719148191491915019151191521915319154191551915619157191581915919160191611916219163191641916519166191671916819169191701917119172191731917419175191761917719178191791918019181191821918319184191851918619187191881918919190191911919219193191941919519196191971919819199192001920119202192031920419205192061920719208192091921019211192121921319214192151921619217192181921919220192211922219223192241922519226192271922819229192301923119232192331923419235192361923719238192391924019241192421924319244192451924619247192481924919250192511925219253192541925519256192571925819259192601926119262192631926419265192661926719268192691927019271192721927319274192751927619277192781927919280192811928219283192841928519286192871928819289192901929119292192931929419295192961929719298192991930019301193021930319304193051930619307193081930919310193111931219313193141931519316193171931819319193201932119322193231932419325193261932719328193291933019331193321933319334193351933619337193381933919340193411934219343193441934519346193471934819349193501935119352193531935419355193561935719358193591936019361193621936319364193651936619367193681936919370193711937219373193741937519376193771937819379193801938119382193831938419385193861938719388193891939019391193921939319394193951939619397193981939919400194011940219403194041940519406194071940819409194101941119412194131941419415194161941719418194191942019421194221942319424194251942619427194281942919430194311943219433194341943519436194371943819439194401944119442194431944419445194461944719448194491945019451194521945319454194551945619457194581945919460194611946219463194641946519466194671946819469194701947119472194731947419475194761947719478194791948019481194821948319484194851948619487194881948919490194911949219493194941949519496194971949819499195001950119502195031950419505195061950719508195091951019511195121951319514195151951619517195181951919520195211952219523195241952519526195271952819529195301953119532195331953419535195361953719538195391954019541195421954319544195451954619547195481954919550195511955219553195541955519556195571955819559195601956119562195631956419565195661956719568195691957019571195721957319574195751957619577195781957919580195811958219583195841958519586195871958819589195901959119592195931959419595195961959719598195991960019601196021960319604196051960619607196081960919610196111961219613196141961519616196171961819619196201962119622196231962419625196261962719628196291963019631196321963319634196351963619637196381963919640196411964219643196441964519646196471964819649196501965119652196531965419655196561965719658196591966019661196621966319664196651966619667196681966919670196711967219673196741967519676196771967819679196801968119682196831968419685196861968719688196891969019691196921969319694196951969619697196981969919700197011970219703197041970519706197071970819709197101971119712197131971419715197161971719718197191972019721197221972319724197251972619727197281972919730197311973219733197341973519736197371973819739197401974119742197431974419745197461974719748197491975019751197521975319754197551975619757197581975919760197611976219763197641976519766197671976819769197701977119772197731977419775197761977719778197791978019781197821978319784197851978619787197881978919790197911979219793197941979519796197971979819799198001980119802198031980419805198061980719808198091981019811198121981319814198151981619817198181981919820198211982219823198241982519826198271982819829198301983119832198331983419835198361983719838198391984019841198421984319844198451984619847198481984919850198511985219853198541985519856198571985819859198601986119862198631986419865198661986719868198691987019871198721987319874198751987619877198781987919880198811988219883198841988519886198871988819889198901989119892198931989419895198961989719898198991990019901199021990319904199051990619907199081990919910199111991219913199141991519916199171991819919199201992119922199231992419925199261992719928199291993019931199321993319934199351993619937199381993919940199411994219943199441994519946199471994819949199501995119952199531995419955199561995719958199591996019961199621996319964199651996619967199681996919970199711997219973199741997519976199771997819979199801998119982199831998419985199861998719988199891999019991199921999319994199951999619997199981999920000200012000220003200042000520006200072000820009200102001120012200132001420015200162001720018200192002020021200222002320024200252002620027200282002920030200312003220033200342003520036200372003820039200402004120042200432004420045200462004720048200492005020051200522005320054200552005620057200582005920060200612006220063200642006520066200672006820069200702007120072200732007420075200762007720078200792008020081200822008320084200852008620087200882008920090200912009220093200942009520096200972009820099201002010120102201032010420105201062010720108201092011020111201122011320114201152011620117201182011920120201212012220123201242012520126201272012820129201302013120132201332013420135201362013720138201392014020141201422014320144201452014620147201482014920150201512015220153201542015520156201572015820159201602016120162201632016420165201662016720168201692017020171201722017320174201752017620177201782017920180201812018220183201842018520186201872018820189201902019120192201932019420195201962019720198201992020020201202022020320204202052020620207202082020920210202112021220213202142021520216202172021820219202202022120222202232022420225202262022720228202292023020231202322023320234202352023620237202382023920240202412024220243202442024520246202472024820249202502025120252202532025420255202562025720258202592026020261202622026320264202652026620267202682026920270202712027220273202742027520276202772027820279202802028120282202832028420285202862028720288202892029020291202922029320294202952029620297202982029920300203012030220303203042030520306203072030820309203102031120312203132031420315203162031720318203192032020321203222032320324203252032620327203282032920330203312033220333203342033520336203372033820339203402034120342203432034420345203462034720348203492035020351203522035320354203552035620357203582035920360203612036220363203642036520366203672036820369203702037120372203732037420375203762037720378203792038020381203822038320384203852038620387203882038920390203912039220393203942039520396203972039820399204002040120402204032040420405204062040720408204092041020411204122041320414204152041620417204182041920420204212042220423204242042520426204272042820429204302043120432204332043420435204362043720438204392044020441204422044320444204452044620447204482044920450204512045220453204542045520456204572045820459204602046120462204632046420465204662046720468204692047020471204722047320474204752047620477204782047920480204812048220483204842048520486204872048820489204902049120492204932049420495204962049720498204992050020501205022050320504205052050620507205082050920510205112051220513205142051520516205172051820519205202052120522205232052420525205262052720528205292053020531205322053320534205352053620537205382053920540205412054220543205442054520546205472054820549205502055120552205532055420555205562055720558205592056020561205622056320564205652056620567205682056920570205712057220573205742057520576205772057820579205802058120582205832058420585205862058720588205892059020591205922059320594205952059620597205982059920600206012060220603206042060520606206072060820609206102061120612206132061420615206162061720618206192062020621206222062320624206252062620627206282062920630206312063220633206342063520636206372063820639206402064120642206432064420645206462064720648206492065020651206522065320654206552065620657206582065920660206612066220663206642066520666206672066820669206702067120672206732067420675206762067720678206792068020681206822068320684206852068620687206882068920690206912069220693206942069520696206972069820699207002070120702207032070420705207062070720708207092071020711207122071320714207152071620717207182071920720207212072220723207242072520726207272072820729207302073120732207332073420735207362073720738207392074020741207422074320744207452074620747207482074920750207512075220753207542075520756207572075820759207602076120762207632076420765207662076720768207692077020771207722077320774207752077620777207782077920780207812078220783207842078520786207872078820789207902079120792207932079420795207962079720798207992080020801208022080320804208052080620807208082080920810208112081220813208142081520816208172081820819208202082120822208232082420825208262082720828208292083020831208322083320834208352083620837208382083920840208412084220843208442084520846208472084820849208502085120852208532085420855208562085720858208592086020861208622086320864208652086620867208682086920870208712087220873208742087520876208772087820879208802088120882208832088420885208862088720888208892089020891208922089320894208952089620897208982089920900209012090220903209042090520906209072090820909209102091120912209132091420915209162091720918209192092020921209222092320924209252092620927209282092920930209312093220933209342093520936209372093820939209402094120942209432094420945209462094720948209492095020951209522095320954209552095620957209582095920960209612096220963209642096520966209672096820969209702097120972209732097420975209762097720978209792098020981209822098320984209852098620987209882098920990209912099220993209942099520996209972099820999210002100121002210032100421005210062100721008210092101021011210122101321014210152101621017210182101921020210212102221023210242102521026210272102821029210302103121032210332103421035210362103721038210392104021041210422104321044210452104621047210482104921050210512105221053210542105521056210572105821059210602106121062210632106421065210662106721068210692107021071210722107321074210752107621077210782107921080210812108221083210842108521086210872108821089210902109121092210932109421095210962109721098210992110021101211022110321104211052110621107211082110921110211112111221113211142111521116211172111821119211202112121122211232112421125211262112721128211292113021131211322113321134211352113621137211382113921140211412114221143211442114521146211472114821149211502115121152211532115421155211562115721158211592116021161211622116321164211652116621167211682116921170211712117221173211742117521176211772117821179211802118121182211832118421185211862118721188211892119021191211922119321194211952119621197211982119921200212012120221203212042120521206212072120821209212102121121212212132121421215212162121721218212192122021221212222122321224212252122621227212282122921230212312123221233212342123521236212372123821239212402124121242212432124421245212462124721248212492125021251212522125321254212552125621257212582125921260212612126221263212642126521266212672126821269212702127121272212732127421275212762127721278212792128021281212822128321284212852128621287212882128921290212912129221293212942129521296212972129821299213002130121302213032130421305213062130721308213092131021311213122131321314213152131621317213182131921320213212132221323213242132521326213272132821329213302133121332213332133421335213362133721338213392134021341213422134321344213452134621347213482134921350213512135221353213542135521356213572135821359213602136121362213632136421365213662136721368213692137021371213722137321374213752137621377213782137921380213812138221383213842138521386213872138821389213902139121392213932139421395213962139721398213992140021401214022140321404214052140621407214082140921410214112141221413214142141521416214172141821419214202142121422214232142421425214262142721428214292143021431214322143321434214352143621437214382143921440214412144221443214442144521446214472144821449214502145121452214532145421455214562145721458214592146021461214622146321464214652146621467214682146921470214712147221473214742147521476214772147821479214802148121482214832148421485214862148721488214892149021491214922149321494214952149621497214982149921500215012150221503215042150521506215072150821509215102151121512215132151421515215162151721518215192152021521215222152321524215252152621527215282152921530215312153221533215342153521536215372153821539215402154121542215432154421545215462154721548215492155021551215522155321554215552155621557215582155921560215612156221563215642156521566215672156821569215702157121572215732157421575215762157721578215792158021581215822158321584215852158621587215882158921590215912159221593215942159521596215972159821599216002160121602216032160421605216062160721608216092161021611216122161321614216152161621617216182161921620216212162221623216242162521626216272162821629216302163121632216332163421635216362163721638216392164021641216422164321644216452164621647216482164921650216512165221653216542165521656216572165821659216602166121662216632166421665216662166721668216692167021671216722167321674216752167621677216782167921680216812168221683216842168521686216872168821689216902169121692216932169421695216962169721698216992170021701217022170321704217052170621707217082170921710217112171221713217142171521716217172171821719217202172121722217232172421725217262172721728217292173021731217322173321734217352173621737217382173921740217412174221743217442174521746217472174821749217502175121752217532175421755217562175721758217592176021761217622176321764217652176621767217682176921770217712177221773217742177521776217772177821779217802178121782217832178421785217862178721788217892179021791217922179321794217952179621797217982179921800218012180221803218042180521806218072180821809218102181121812218132181421815218162181721818218192182021821218222182321824218252182621827218282182921830218312183221833218342183521836218372183821839218402184121842218432184421845218462184721848218492185021851218522185321854218552185621857218582185921860218612186221863218642186521866218672186821869218702187121872218732187421875218762187721878218792188021881218822188321884218852188621887218882188921890218912189221893218942189521896218972189821899219002190121902219032190421905219062190721908219092191021911219122191321914219152191621917219182191921920219212192221923219242192521926219272192821929219302193121932219332193421935219362193721938219392194021941219422194321944219452194621947219482194921950219512195221953219542195521956219572195821959219602196121962219632196421965219662196721968219692197021971219722197321974219752197621977219782197921980219812198221983219842198521986219872198821989219902199121992219932199421995219962199721998219992200022001220022200322004220052200622007220082200922010220112201222013220142201522016220172201822019220202202122022220232202422025220262202722028220292203022031220322203322034220352203622037220382203922040220412204222043220442204522046220472204822049220502205122052220532205422055220562205722058220592206022061220622206322064220652206622067220682206922070220712207222073220742207522076220772207822079220802208122082220832208422085220862208722088220892209022091220922209322094220952209622097220982209922100221012210222103221042210522106221072210822109221102211122112221132211422115221162211722118221192212022121221222212322124221252212622127221282212922130221312213222133221342213522136221372213822139221402214122142221432214422145221462214722148221492215022151221522215322154221552215622157221582215922160221612216222163221642216522166221672216822169221702217122172221732217422175221762217722178221792218022181221822218322184221852218622187221882218922190221912219222193221942219522196221972219822199222002220122202222032220422205222062220722208222092221022211222122221322214222152221622217222182221922220222212222222223222242222522226222272222822229222302223122232222332223422235222362223722238222392224022241222422224322244222452224622247222482224922250222512225222253222542225522256222572225822259222602226122262222632226422265222662226722268222692227022271222722227322274222752227622277222782227922280222812228222283222842228522286222872228822289222902229122292222932229422295222962229722298222992230022301223022230322304223052230622307223082230922310223112231222313223142231522316223172231822319223202232122322223232232422325223262232722328223292233022331223322233322334223352233622337223382233922340223412234222343223442234522346223472234822349223502235122352223532235422355223562235722358223592236022361223622236322364223652236622367223682236922370223712237222373223742237522376223772237822379223802238122382223832238422385223862238722388223892239022391223922239322394223952239622397223982239922400224012240222403224042240522406224072240822409224102241122412224132241422415224162241722418224192242022421224222242322424224252242622427224282242922430224312243222433224342243522436224372243822439224402244122442224432244422445224462244722448224492245022451224522245322454224552245622457224582245922460224612246222463224642246522466224672246822469224702247122472224732247422475224762247722478224792248022481224822248322484224852248622487224882248922490224912249222493224942249522496224972249822499225002250122502225032250422505225062250722508225092251022511225122251322514225152251622517225182251922520225212252222523225242252522526225272252822529225302253122532225332253422535225362253722538225392254022541225422254322544225452254622547225482254922550225512255222553225542255522556225572255822559225602256122562225632256422565225662256722568225692257022571225722257322574225752257622577225782257922580225812258222583225842258522586225872258822589225902259122592225932259422595225962259722598225992260022601226022260322604226052260622607226082260922610226112261222613226142261522616226172261822619226202262122622226232262422625226262262722628226292263022631226322263322634226352263622637226382263922640226412264222643226442264522646226472264822649226502265122652226532265422655226562265722658226592266022661226622266322664226652266622667226682266922670226712267222673226742267522676226772267822679226802268122682226832268422685226862268722688226892269022691226922269322694226952269622697226982269922700227012270222703227042270522706227072270822709227102271122712227132271422715227162271722718227192272022721227222272322724227252272622727227282272922730227312273222733227342273522736227372273822739227402274122742227432274422745227462274722748227492275022751227522275322754227552275622757227582275922760227612276222763227642276522766227672276822769227702277122772227732277422775227762277722778227792278022781227822278322784227852278622787227882278922790227912279222793227942279522796227972279822799228002280122802228032280422805228062280722808228092281022811228122281322814228152281622817228182281922820228212282222823228242282522826228272282822829228302283122832228332283422835228362283722838228392284022841228422284322844228452284622847228482284922850228512285222853228542285522856228572285822859228602286122862228632286422865228662286722868228692287022871228722287322874228752287622877228782287922880228812288222883228842288522886228872288822889228902289122892228932289422895228962289722898228992290022901229022290322904229052290622907229082290922910229112291222913229142291522916229172291822919229202292122922229232292422925229262292722928229292293022931229322293322934229352293622937229382293922940229412294222943229442294522946229472294822949229502295122952229532295422955229562295722958229592296022961229622296322964229652296622967229682296922970229712297222973229742297522976229772297822979229802298122982229832298422985229862298722988229892299022991229922299322994229952299622997229982299923000230012300223003230042300523006230072300823009230102301123012230132301423015230162301723018230192302023021230222302323024230252302623027230282302923030230312303223033230342303523036230372303823039230402304123042230432304423045230462304723048230492305023051230522305323054230552305623057230582305923060230612306223063230642306523066230672306823069230702307123072230732307423075230762307723078230792308023081230822308323084230852308623087230882308923090230912309223093230942309523096230972309823099231002310123102231032310423105231062310723108231092311023111231122311323114231152311623117231182311923120231212312223123231242312523126231272312823129231302313123132231332313423135231362313723138231392314023141231422314323144231452314623147231482314923150231512315223153231542315523156231572315823159231602316123162231632316423165231662316723168231692317023171231722317323174231752317623177231782317923180231812318223183231842318523186231872318823189231902319123192231932319423195231962319723198231992320023201232022320323204232052320623207232082320923210232112321223213232142321523216232172321823219232202322123222232232322423225232262322723228232292323023231232322323323234232352323623237232382323923240232412324223243232442324523246232472324823249232502325123252232532325423255232562325723258232592326023261232622326323264232652326623267232682326923270232712327223273232742327523276232772327823279232802328123282232832328423285232862328723288232892329023291232922329323294232952329623297232982329923300233012330223303233042330523306233072330823309233102331123312233132331423315233162331723318233192332023321233222332323324233252332623327233282332923330233312333223333233342333523336233372333823339233402334123342233432334423345233462334723348233492335023351233522335323354233552335623357233582335923360233612336223363233642336523366233672336823369233702337123372233732337423375233762337723378233792338023381233822338323384233852338623387233882338923390233912339223393233942339523396233972339823399234002340123402234032340423405234062340723408234092341023411234122341323414234152341623417234182341923420234212342223423234242342523426234272342823429234302343123432234332343423435234362343723438234392344023441234422344323444234452344623447234482344923450234512345223453234542345523456234572345823459234602346123462234632346423465234662346723468234692347023471234722347323474234752347623477234782347923480234812348223483234842348523486234872348823489234902349123492234932349423495234962349723498234992350023501235022350323504235052350623507235082350923510235112351223513235142351523516235172351823519235202352123522235232352423525235262352723528235292353023531235322353323534235352353623537235382353923540235412354223543235442354523546235472354823549235502355123552235532355423555235562355723558235592356023561235622356323564235652356623567235682356923570235712357223573235742357523576235772357823579235802358123582235832358423585235862358723588235892359023591235922359323594235952359623597235982359923600236012360223603236042360523606236072360823609236102361123612236132361423615236162361723618236192362023621236222362323624236252362623627236282362923630236312363223633236342363523636236372363823639236402364123642236432364423645236462364723648236492365023651236522365323654236552365623657236582365923660236612366223663236642366523666236672366823669236702367123672236732367423675236762367723678236792368023681236822368323684236852368623687236882368923690236912369223693236942369523696236972369823699237002370123702237032370423705237062370723708237092371023711237122371323714237152371623717237182371923720237212372223723237242372523726237272372823729237302373123732237332373423735237362373723738237392374023741237422374323744237452374623747237482374923750237512375223753237542375523756237572375823759237602376123762237632376423765237662376723768237692377023771237722377323774237752377623777237782377923780237812378223783237842378523786237872378823789237902379123792237932379423795237962379723798237992380023801238022380323804238052380623807238082380923810238112381223813238142381523816238172381823819238202382123822238232382423825238262382723828238292383023831238322383323834238352383623837238382383923840238412384223843238442384523846238472384823849238502385123852238532385423855238562385723858238592386023861238622386323864238652386623867238682386923870238712387223873238742387523876238772387823879238802388123882238832388423885238862388723888238892389023891238922389323894238952389623897238982389923900239012390223903239042390523906239072390823909239102391123912239132391423915239162391723918239192392023921239222392323924239252392623927239282392923930239312393223933239342393523936239372393823939239402394123942239432394423945239462394723948239492395023951239522395323954239552395623957239582395923960239612396223963239642396523966239672396823969239702397123972239732397423975239762397723978239792398023981239822398323984239852398623987239882398923990239912399223993239942399523996239972399823999240002400124002240032400424005240062400724008240092401024011240122401324014240152401624017240182401924020240212402224023240242402524026240272402824029240302403124032240332403424035240362403724038240392404024041240422404324044240452404624047240482404924050240512405224053240542405524056240572405824059240602406124062240632406424065240662406724068240692407024071240722407324074240752407624077240782407924080240812408224083240842408524086240872408824089240902409124092240932409424095240962409724098240992410024101241022410324104241052410624107241082410924110241112411224113241142411524116241172411824119241202412124122241232412424125241262412724128241292413024131241322413324134241352413624137241382413924140241412414224143241442414524146241472414824149241502415124152241532415424155241562415724158241592416024161241622416324164241652416624167241682416924170241712417224173241742417524176241772417824179241802418124182241832418424185241862418724188241892419024191241922419324194241952419624197241982419924200242012420224203242042420524206242072420824209242102421124212242132421424215242162421724218242192422024221242222422324224242252422624227242282422924230242312423224233242342423524236242372423824239242402424124242242432424424245242462424724248242492425024251242522425324254242552425624257242582425924260242612426224263242642426524266242672426824269242702427124272242732427424275242762427724278242792428024281242822428324284242852428624287242882428924290242912429224293242942429524296242972429824299243002430124302243032430424305243062430724308243092431024311243122431324314243152431624317243182431924320243212432224323243242432524326243272432824329243302433124332243332433424335243362433724338243392434024341243422434324344243452434624347243482434924350243512435224353243542435524356243572435824359243602436124362243632436424365243662436724368243692437024371243722437324374243752437624377243782437924380243812438224383243842438524386243872438824389243902439124392243932439424395243962439724398243992440024401244022440324404244052440624407244082440924410244112441224413244142441524416244172441824419244202442124422244232442424425244262442724428244292443024431244322443324434244352443624437244382443924440244412444224443244442444524446244472444824449244502445124452244532445424455244562445724458244592446024461244622446324464244652446624467244682446924470244712447224473244742447524476244772447824479244802448124482244832448424485244862448724488244892449024491244922449324494244952449624497244982449924500245012450224503245042450524506245072450824509245102451124512245132451424515245162451724518245192452024521245222452324524245252452624527245282452924530245312453224533245342453524536245372453824539245402454124542245432454424545245462454724548245492455024551245522455324554245552455624557245582455924560245612456224563245642456524566245672456824569245702457124572245732457424575245762457724578245792458024581245822458324584245852458624587245882458924590245912459224593245942459524596245972459824599246002460124602246032460424605246062460724608246092461024611246122461324614246152461624617246182461924620246212462224623246242462524626246272462824629246302463124632246332463424635246362463724638246392464024641246422464324644246452464624647246482464924650246512465224653246542465524656246572465824659246602466124662246632466424665246662466724668246692467024671246722467324674246752467624677246782467924680246812468224683246842468524686246872468824689246902469124692246932469424695246962469724698246992470024701247022470324704247052470624707247082470924710247112471224713247142471524716247172471824719247202472124722247232472424725247262472724728247292473024731247322473324734247352473624737247382473924740247412474224743247442474524746247472474824749247502475124752247532475424755247562475724758247592476024761247622476324764247652476624767247682476924770247712477224773247742477524776247772477824779247802478124782247832478424785247862478724788247892479024791247922479324794247952479624797247982479924800248012480224803248042480524806248072480824809248102481124812248132481424815248162481724818248192482024821248222482324824248252482624827248282482924830248312483224833248342483524836248372483824839248402484124842248432484424845248462484724848248492485024851248522485324854248552485624857248582485924860248612486224863248642486524866248672486824869248702487124872248732487424875248762487724878248792488024881248822488324884248852488624887248882488924890248912489224893248942489524896248972489824899249002490124902249032490424905249062490724908249092491024911249122491324914249152491624917249182491924920249212492224923249242492524926249272492824929249302493124932249332493424935249362493724938249392494024941249422494324944249452494624947249482494924950249512495224953249542495524956249572495824959249602496124962249632496424965249662496724968249692497024971249722497324974249752497624977249782497924980249812498224983249842498524986249872498824989249902499124992249932499424995249962499724998249992500025001250022500325004250052500625007250082500925010250112501225013250142501525016250172501825019250202502125022250232502425025250262502725028250292503025031250322503325034250352503625037250382503925040250412504225043250442504525046250472504825049250502505125052250532505425055250562505725058250592506025061250622506325064250652506625067250682506925070250712507225073250742507525076250772507825079250802508125082250832508425085250862508725088250892509025091250922509325094250952509625097250982509925100251012510225103251042510525106251072510825109251102511125112251132511425115251162511725118251192512025121251222512325124251252512625127251282512925130251312513225133251342513525136251372513825139251402514125142251432514425145251462514725148251492515025151251522515325154251552515625157251582515925160251612516225163251642516525166251672516825169251702517125172251732517425175251762517725178251792518025181251822518325184251852518625187251882518925190251912519225193251942519525196251972519825199252002520125202252032520425205252062520725208252092521025211252122521325214252152521625217252182521925220252212522225223252242522525226252272522825229252302523125232252332523425235252362523725238252392524025241252422524325244252452524625247252482524925250252512525225253252542525525256252572525825259252602526125262252632526425265252662526725268252692527025271252722527325274252752527625277252782527925280252812528225283252842528525286252872528825289252902529125292252932529425295252962529725298252992530025301253022530325304253052530625307253082530925310253112531225313253142531525316253172531825319253202532125322253232532425325253262532725328253292533025331253322533325334253352533625337253382533925340253412534225343253442534525346253472534825349253502535125352253532535425355253562535725358253592536025361253622536325364253652536625367253682536925370253712537225373253742537525376253772537825379253802538125382253832538425385253862538725388253892539025391253922539325394253952539625397253982539925400254012540225403254042540525406254072540825409254102541125412254132541425415254162541725418254192542025421254222542325424254252542625427254282542925430254312543225433254342543525436254372543825439254402544125442254432544425445254462544725448254492545025451254522545325454254552545625457254582545925460254612546225463254642546525466254672546825469254702547125472254732547425475254762547725478254792548025481254822548325484254852548625487254882548925490254912549225493254942549525496254972549825499255002550125502255032550425505255062550725508255092551025511255122551325514255152551625517255182551925520255212552225523255242552525526255272552825529255302553125532255332553425535255362553725538255392554025541255422554325544255452554625547255482554925550255512555225553255542555525556255572555825559255602556125562255632556425565255662556725568255692557025571255722557325574255752557625577255782557925580255812558225583255842558525586255872558825589255902559125592255932559425595255962559725598255992560025601256022560325604256052560625607256082560925610256112561225613256142561525616256172561825619256202562125622256232562425625256262562725628256292563025631256322563325634256352563625637256382563925640256412564225643256442564525646256472564825649256502565125652256532565425655256562565725658256592566025661256622566325664256652566625667256682566925670256712567225673256742567525676256772567825679256802568125682256832568425685256862568725688256892569025691256922569325694256952569625697256982569925700257012570225703257042570525706257072570825709257102571125712257132571425715257162571725718257192572025721257222572325724257252572625727257282572925730257312573225733257342573525736257372573825739257402574125742257432574425745257462574725748257492575025751257522575325754257552575625757257582575925760257612576225763257642576525766257672576825769257702577125772257732577425775257762577725778257792578025781257822578325784257852578625787257882578925790257912579225793257942579525796257972579825799258002580125802258032580425805258062580725808258092581025811258122581325814258152581625817258182581925820258212582225823258242582525826258272582825829258302583125832258332583425835258362583725838258392584025841258422584325844258452584625847258482584925850258512585225853258542585525856258572585825859258602586125862258632586425865258662586725868258692587025871258722587325874258752587625877258782587925880258812588225883258842588525886258872588825889258902589125892258932589425895258962589725898258992590025901259022590325904259052590625907259082590925910259112591225913259142591525916259172591825919259202592125922259232592425925259262592725928259292593025931259322593325934259352593625937259382593925940259412594225943259442594525946259472594825949259502595125952259532595425955259562595725958259592596025961259622596325964259652596625967259682596925970259712597225973259742597525976259772597825979259802598125982259832598425985259862598725988259892599025991259922599325994259952599625997259982599926000260012600226003260042600526006260072600826009260102601126012260132601426015260162601726018260192602026021260222602326024260252602626027260282602926030260312603226033260342603526036260372603826039260402604126042260432604426045260462604726048260492605026051260522605326054260552605626057260582605926060260612606226063260642606526066260672606826069260702607126072260732607426075260762607726078260792608026081260822608326084260852608626087260882608926090260912609226093260942609526096260972609826099261002610126102261032610426105261062610726108261092611026111261122611326114261152611626117261182611926120261212612226123261242612526126261272612826129261302613126132261332613426135261362613726138261392614026141261422614326144261452614626147261482614926150261512615226153261542615526156261572615826159261602616126162261632616426165261662616726168261692617026171261722617326174261752617626177261782617926180261812618226183261842618526186261872618826189261902619126192261932619426195261962619726198261992620026201262022620326204262052620626207262082620926210262112621226213262142621526216262172621826219262202622126222262232622426225262262622726228262292623026231262322623326234262352623626237262382623926240262412624226243262442624526246262472624826249262502625126252262532625426255262562625726258262592626026261262622626326264262652626626267262682626926270262712627226273262742627526276262772627826279262802628126282262832628426285262862628726288262892629026291262922629326294262952629626297262982629926300263012630226303263042630526306263072630826309263102631126312263132631426315263162631726318263192632026321263222632326324263252632626327263282632926330263312633226333263342633526336263372633826339263402634126342263432634426345263462634726348263492635026351263522635326354263552635626357263582635926360263612636226363263642636526366263672636826369263702637126372263732637426375263762637726378263792638026381263822638326384263852638626387263882638926390263912639226393263942639526396263972639826399264002640126402264032640426405264062640726408264092641026411264122641326414264152641626417264182641926420264212642226423264242642526426264272642826429264302643126432264332643426435264362643726438264392644026441264422644326444264452644626447264482644926450264512645226453264542645526456264572645826459264602646126462264632646426465264662646726468264692647026471264722647326474264752647626477264782647926480264812648226483264842648526486264872648826489264902649126492264932649426495264962649726498264992650026501265022650326504265052650626507265082650926510265112651226513265142651526516265172651826519265202652126522265232652426525265262652726528265292653026531265322653326534265352653626537265382653926540265412654226543265442654526546265472654826549265502655126552265532655426555265562655726558265592656026561265622656326564265652656626567265682656926570265712657226573265742657526576265772657826579265802658126582265832658426585265862658726588265892659026591265922659326594265952659626597265982659926600266012660226603266042660526606266072660826609266102661126612266132661426615266162661726618266192662026621266222662326624266252662626627266282662926630266312663226633266342663526636266372663826639266402664126642266432664426645266462664726648266492665026651266522665326654266552665626657266582665926660266612666226663266642666526666266672666826669266702667126672266732667426675266762667726678266792668026681266822668326684266852668626687266882668926690266912669226693266942669526696266972669826699267002670126702267032670426705267062670726708267092671026711267122671326714267152671626717267182671926720267212672226723267242672526726267272672826729267302673126732267332673426735267362673726738267392674026741267422674326744267452674626747267482674926750267512675226753267542675526756267572675826759267602676126762267632676426765267662676726768267692677026771267722677326774267752677626777267782677926780267812678226783267842678526786267872678826789267902679126792267932679426795267962679726798267992680026801268022680326804268052680626807268082680926810268112681226813268142681526816268172681826819268202682126822268232682426825268262682726828268292683026831268322683326834268352683626837268382683926840268412684226843268442684526846268472684826849268502685126852268532685426855268562685726858268592686026861268622686326864268652686626867268682686926870268712687226873268742687526876268772687826879268802688126882268832688426885268862688726888268892689026891268922689326894268952689626897268982689926900269012690226903269042690526906269072690826909269102691126912269132691426915269162691726918269192692026921269222692326924269252692626927269282692926930269312693226933269342693526936269372693826939269402694126942269432694426945269462694726948269492695026951269522695326954269552695626957269582695926960269612696226963269642696526966269672696826969269702697126972269732697426975269762697726978269792698026981269822698326984269852698626987269882698926990269912699226993269942699526996269972699826999270002700127002270032700427005270062700727008270092701027011270122701327014270152701627017270182701927020270212702227023270242702527026270272702827029270302703127032270332703427035270362703727038270392704027041270422704327044270452704627047270482704927050270512705227053270542705527056270572705827059270602706127062270632706427065270662706727068270692707027071270722707327074270752707627077270782707927080270812708227083270842708527086270872708827089270902709127092270932709427095270962709727098270992710027101271022710327104271052710627107271082710927110271112711227113271142711527116271172711827119271202712127122271232712427125271262712727128271292713027131271322713327134271352713627137271382713927140271412714227143271442714527146271472714827149271502715127152271532715427155271562715727158271592716027161271622716327164271652716627167271682716927170271712717227173271742717527176271772717827179271802718127182271832718427185271862718727188271892719027191271922719327194271952719627197271982719927200272012720227203272042720527206272072720827209272102721127212272132721427215272162721727218272192722027221272222722327224272252722627227272282722927230272312723227233272342723527236272372723827239272402724127242272432724427245272462724727248272492725027251272522725327254272552725627257272582725927260272612726227263272642726527266272672726827269272702727127272272732727427275272762727727278272792728027281272822728327284272852728627287272882728927290272912729227293272942729527296272972729827299273002730127302273032730427305273062730727308273092731027311273122731327314273152731627317273182731927320273212732227323273242732527326273272732827329273302733127332273332733427335273362733727338273392734027341273422734327344273452734627347273482734927350273512735227353273542735527356273572735827359273602736127362273632736427365273662736727368273692737027371273722737327374273752737627377273782737927380273812738227383273842738527386273872738827389273902739127392273932739427395273962739727398273992740027401274022740327404274052740627407274082740927410274112741227413274142741527416274172741827419274202742127422274232742427425274262742727428274292743027431274322743327434274352743627437274382743927440274412744227443274442744527446274472744827449274502745127452274532745427455274562745727458274592746027461274622746327464274652746627467274682746927470274712747227473274742747527476274772747827479274802748127482274832748427485274862748727488274892749027491274922749327494274952749627497274982749927500275012750227503275042750527506275072750827509275102751127512275132751427515275162751727518275192752027521275222752327524275252752627527275282752927530275312753227533275342753527536275372753827539275402754127542275432754427545275462754727548275492755027551275522755327554275552755627557275582755927560275612756227563275642756527566275672756827569275702757127572275732757427575275762757727578275792758027581275822758327584275852758627587275882758927590275912759227593275942759527596275972759827599276002760127602276032760427605276062760727608276092761027611276122761327614276152761627617276182761927620276212762227623276242762527626276272762827629276302763127632276332763427635276362763727638276392764027641276422764327644276452764627647276482764927650276512765227653276542765527656276572765827659276602766127662276632766427665276662766727668276692767027671276722767327674276752767627677276782767927680276812768227683276842768527686276872768827689276902769127692276932769427695276962769727698276992770027701277022770327704277052770627707277082770927710277112771227713277142771527716277172771827719277202772127722277232772427725277262772727728277292773027731277322773327734277352773627737277382773927740277412774227743277442774527746277472774827749277502775127752277532775427755277562775727758277592776027761277622776327764277652776627767277682776927770277712777227773277742777527776277772777827779277802778127782277832778427785277862778727788277892779027791277922779327794277952779627797277982779927800278012780227803278042780527806278072780827809278102781127812278132781427815278162781727818278192782027821278222782327824278252782627827278282782927830278312783227833278342783527836278372783827839278402784127842278432784427845278462784727848278492785027851278522785327854278552785627857278582785927860278612786227863278642786527866278672786827869278702787127872278732787427875278762787727878278792788027881278822788327884278852788627887278882788927890278912789227893278942789527896278972789827899279002790127902279032790427905279062790727908279092791027911279122791327914279152791627917279182791927920279212792227923279242792527926279272792827929279302793127932279332793427935279362793727938279392794027941279422794327944279452794627947279482794927950279512795227953279542795527956279572795827959279602796127962279632796427965279662796727968279692797027971279722797327974279752797627977279782797927980279812798227983279842798527986279872798827989279902799127992279932799427995279962799727998279992800028001280022800328004280052800628007280082800928010280112801228013280142801528016280172801828019280202802128022280232802428025280262802728028280292803028031280322803328034280352803628037280382803928040280412804228043280442804528046280472804828049280502805128052280532805428055280562805728058280592806028061280622806328064280652806628067280682806928070280712807228073280742807528076280772807828079280802808128082280832808428085280862808728088280892809028091280922809328094280952809628097280982809928100281012810228103281042810528106281072810828109281102811128112281132811428115281162811728118281192812028121281222812328124281252812628127281282812928130281312813228133281342813528136281372813828139281402814128142281432814428145281462814728148281492815028151281522815328154281552815628157281582815928160281612816228163281642816528166281672816828169281702817128172281732817428175281762817728178281792818028181281822818328184281852818628187281882818928190281912819228193281942819528196281972819828199282002820128202282032820428205282062820728208282092821028211282122821328214282152821628217282182821928220282212822228223282242822528226282272822828229282302823128232282332823428235282362823728238282392824028241282422824328244282452824628247282482824928250282512825228253282542825528256282572825828259282602826128262282632826428265282662826728268282692827028271282722827328274282752827628277282782827928280282812828228283282842828528286282872828828289282902829128292282932829428295282962829728298282992830028301283022830328304283052830628307283082830928310283112831228313283142831528316283172831828319283202832128322283232832428325283262832728328283292833028331283322833328334283352833628337283382833928340283412834228343283442834528346283472834828349283502835128352283532835428355283562835728358283592836028361283622836328364283652836628367283682836928370283712837228373283742837528376283772837828379283802838128382283832838428385283862838728388283892839028391283922839328394283952839628397283982839928400284012840228403284042840528406284072840828409284102841128412284132841428415284162841728418284192842028421284222842328424284252842628427284282842928430284312843228433284342843528436284372843828439284402844128442284432844428445284462844728448284492845028451284522845328454284552845628457284582845928460284612846228463284642846528466284672846828469284702847128472284732847428475284762847728478284792848028481284822848328484284852848628487284882848928490284912849228493284942849528496284972849828499285002850128502285032850428505285062850728508285092851028511285122851328514285152851628517285182851928520285212852228523285242852528526285272852828529285302853128532285332853428535285362853728538285392854028541285422854328544285452854628547285482854928550285512855228553285542855528556285572855828559285602856128562285632856428565285662856728568285692857028571285722857328574285752857628577285782857928580285812858228583285842858528586285872858828589285902859128592285932859428595285962859728598285992860028601286022860328604286052860628607286082860928610286112861228613286142861528616286172861828619286202862128622286232862428625286262862728628286292863028631286322863328634286352863628637286382863928640286412864228643286442864528646286472864828649286502865128652286532865428655286562865728658286592866028661286622866328664286652866628667286682866928670286712867228673286742867528676286772867828679286802868128682286832868428685286862868728688286892869028691286922869328694286952869628697286982869928700287012870228703287042870528706287072870828709287102871128712287132871428715287162871728718287192872028721287222872328724287252872628727287282872928730287312873228733287342873528736287372873828739287402874128742287432874428745287462874728748287492875028751287522875328754287552875628757287582875928760287612876228763287642876528766287672876828769287702877128772287732877428775287762877728778287792878028781287822878328784287852878628787287882878928790287912879228793287942879528796287972879828799288002880128802288032880428805288062880728808288092881028811288122881328814288152881628817288182881928820288212882228823288242882528826288272882828829288302883128832288332883428835288362883728838288392884028841288422884328844288452884628847288482884928850288512885228853288542885528856288572885828859288602886128862288632886428865288662886728868288692887028871288722887328874288752887628877288782887928880288812888228883288842888528886288872888828889288902889128892288932889428895288962889728898288992890028901289022890328904289052890628907289082890928910289112891228913289142891528916289172891828919289202892128922289232892428925289262892728928289292893028931289322893328934289352893628937289382893928940289412894228943289442894528946289472894828949289502895128952289532895428955289562895728958289592896028961289622896328964289652896628967289682896928970289712897228973289742897528976289772897828979289802898128982289832898428985289862898728988289892899028991289922899328994289952899628997289982899929000290012900229003290042900529006290072900829009290102901129012290132901429015290162901729018290192902029021290222902329024290252902629027290282902929030290312903229033290342903529036290372903829039290402904129042290432904429045290462904729048290492905029051290522905329054290552905629057290582905929060290612906229063290642906529066290672906829069290702907129072290732907429075290762907729078290792908029081290822908329084290852908629087290882908929090290912909229093290942909529096290972909829099291002910129102291032910429105291062910729108291092911029111291122911329114291152911629117291182911929120291212912229123291242912529126291272912829129291302913129132291332913429135291362913729138291392914029141291422914329144291452914629147291482914929150291512915229153291542915529156291572915829159291602916129162291632916429165291662916729168291692917029171291722917329174291752917629177291782917929180291812918229183291842918529186291872918829189291902919129192291932919429195291962919729198291992920029201292022920329204292052920629207292082920929210292112921229213292142921529216292172921829219292202922129222292232922429225292262922729228292292923029231292322923329234292352923629237292382923929240292412924229243292442924529246292472924829249292502925129252292532925429255292562925729258292592926029261292622926329264292652926629267292682926929270292712927229273292742927529276292772927829279292802928129282292832928429285292862928729288292892929029291292922929329294292952929629297292982929929300293012930229303293042930529306293072930829309293102931129312293132931429315293162931729318293192932029321293222932329324293252932629327293282932929330293312933229333293342933529336293372933829339293402934129342293432934429345293462934729348293492935029351293522935329354293552935629357293582935929360293612936229363293642936529366293672936829369293702937129372293732937429375293762937729378293792938029381293822938329384293852938629387293882938929390293912939229393293942939529396293972939829399294002940129402294032940429405294062940729408294092941029411294122941329414294152941629417294182941929420294212942229423294242942529426294272942829429294302943129432294332943429435294362943729438294392944029441294422944329444294452944629447294482944929450294512945229453294542945529456294572945829459294602946129462294632946429465294662946729468294692947029471294722947329474294752947629477294782947929480294812948229483294842948529486294872948829489294902949129492294932949429495294962949729498294992950029501295022950329504295052950629507295082950929510295112951229513295142951529516295172951829519295202952129522295232952429525295262952729528295292953029531295322953329534295352953629537295382953929540295412954229543295442954529546295472954829549295502955129552295532955429555295562955729558295592956029561295622956329564295652956629567295682956929570295712957229573295742957529576295772957829579295802958129582295832958429585295862958729588295892959029591295922959329594295952959629597295982959929600296012960229603296042960529606296072960829609296102961129612296132961429615296162961729618296192962029621296222962329624296252962629627296282962929630296312963229633296342963529636296372963829639296402964129642296432964429645296462964729648296492965029651296522965329654296552965629657296582965929660296612966229663296642966529666296672966829669296702967129672296732967429675296762967729678296792968029681296822968329684296852968629687296882968929690296912969229693296942969529696296972969829699297002970129702297032970429705297062970729708297092971029711297122971329714297152971629717297182971929720297212972229723297242972529726297272972829729297302973129732297332973429735297362973729738297392974029741297422974329744297452974629747297482974929750297512975229753297542975529756297572975829759297602976129762297632976429765297662976729768297692977029771297722977329774297752977629777297782977929780297812978229783297842978529786297872978829789297902979129792297932979429795297962979729798297992980029801298022980329804298052980629807298082980929810298112981229813298142981529816298172981829819298202982129822298232982429825298262982729828298292983029831298322983329834298352983629837298382983929840298412984229843298442984529846298472984829849298502985129852298532985429855298562985729858298592986029861298622986329864298652986629867298682986929870298712987229873298742987529876298772987829879298802988129882298832988429885298862988729888298892989029891298922989329894298952989629897298982989929900299012990229903299042990529906299072990829909299102991129912299132991429915299162991729918299192992029921299222992329924299252992629927299282992929930299312993229933299342993529936299372993829939299402994129942299432994429945299462994729948299492995029951299522995329954299552995629957299582995929960299612996229963299642996529966299672996829969299702997129972299732997429975299762997729978299792998029981299822998329984299852998629987299882998929990299912999229993299942999529996299972999829999300003000130002300033000430005300063000730008300093001030011300123001330014300153001630017300183001930020300213002230023300243002530026300273002830029300303003130032300333003430035300363003730038300393004030041300423004330044300453004630047300483004930050300513005230053300543005530056300573005830059300603006130062300633006430065300663006730068300693007030071300723007330074300753007630077300783007930080300813008230083300843008530086300873008830089300903009130092300933009430095300963009730098300993010030101301023010330104301053010630107301083010930110301113011230113301143011530116301173011830119301203012130122301233012430125301263012730128301293013030131301323013330134301353013630137301383013930140301413014230143301443014530146301473014830149301503015130152301533015430155301563015730158301593016030161301623016330164301653016630167301683016930170301713017230173301743017530176301773017830179301803018130182301833018430185301863018730188301893019030191301923019330194301953019630197301983019930200302013020230203302043020530206302073020830209302103021130212302133021430215302163021730218302193022030221302223022330224302253022630227302283022930230302313023230233302343023530236302373023830239302403024130242302433024430245302463024730248302493025030251302523025330254302553025630257302583025930260302613026230263302643026530266302673026830269302703027130272302733027430275302763027730278302793028030281302823028330284302853028630287302883028930290302913029230293302943029530296302973029830299303003030130302303033030430305303063030730308303093031030311303123031330314303153031630317303183031930320303213032230323303243032530326303273032830329303303033130332303333033430335303363033730338303393034030341303423034330344303453034630347303483034930350303513035230353303543035530356303573035830359303603036130362303633036430365303663036730368303693037030371303723037330374303753037630377303783037930380303813038230383303843038530386303873038830389303903039130392303933039430395303963039730398303993040030401304023040330404304053040630407304083040930410304113041230413304143041530416304173041830419304203042130422304233042430425304263042730428304293043030431304323043330434304353043630437304383043930440304413044230443304443044530446304473044830449304503045130452304533045430455304563045730458304593046030461304623046330464304653046630467304683046930470304713047230473304743047530476304773047830479304803048130482304833048430485304863048730488304893049030491304923049330494304953049630497304983049930500305013050230503305043050530506305073050830509305103051130512305133051430515305163051730518305193052030521305223052330524305253052630527305283052930530305313053230533305343053530536305373053830539305403054130542305433054430545305463054730548305493055030551305523055330554305553055630557305583055930560305613056230563305643056530566305673056830569305703057130572305733057430575305763057730578305793058030581305823058330584305853058630587305883058930590305913059230593305943059530596305973059830599306003060130602306033060430605306063060730608306093061030611306123061330614306153061630617306183061930620306213062230623306243062530626306273062830629306303063130632306333063430635306363063730638306393064030641306423064330644306453064630647306483064930650306513065230653306543065530656306573065830659306603066130662306633066430665306663066730668306693067030671306723067330674306753067630677306783067930680306813068230683306843068530686306873068830689306903069130692306933069430695306963069730698306993070030701307023070330704307053070630707307083070930710307113071230713307143071530716307173071830719307203072130722307233072430725307263072730728307293073030731307323073330734307353073630737307383073930740307413074230743307443074530746307473074830749307503075130752307533075430755307563075730758307593076030761307623076330764307653076630767307683076930770307713077230773307743077530776307773077830779307803078130782307833078430785307863078730788307893079030791307923079330794307953079630797307983079930800308013080230803308043080530806308073080830809308103081130812308133081430815308163081730818308193082030821308223082330824308253082630827308283082930830308313083230833308343083530836308373083830839308403084130842308433084430845308463084730848308493085030851308523085330854308553085630857308583085930860308613086230863308643086530866308673086830869308703087130872308733087430875308763087730878308793088030881308823088330884308853088630887308883088930890308913089230893308943089530896308973089830899309003090130902309033090430905309063090730908309093091030911309123091330914309153091630917309183091930920309213092230923309243092530926309273092830929309303093130932309333093430935309363093730938309393094030941309423094330944309453094630947309483094930950309513095230953309543095530956309573095830959309603096130962309633096430965309663096730968309693097030971309723097330974309753097630977309783097930980309813098230983309843098530986309873098830989309903099130992309933099430995309963099730998309993100031001310023100331004310053100631007310083100931010310113101231013310143101531016310173101831019310203102131022310233102431025310263102731028310293103031031310323103331034310353103631037310383103931040310413104231043310443104531046310473104831049310503105131052310533105431055310563105731058310593106031061310623106331064310653106631067310683106931070310713107231073310743107531076310773107831079310803108131082310833108431085310863108731088310893109031091310923109331094310953109631097310983109931100311013110231103311043110531106311073110831109311103111131112311133111431115311163111731118311193112031121311223112331124311253112631127311283112931130311313113231133311343113531136311373113831139311403114131142311433114431145311463114731148311493115031151311523115331154311553115631157311583115931160311613116231163311643116531166311673116831169311703117131172311733117431175311763117731178311793118031181311823118331184311853118631187311883118931190
  1. #pragma once
  2. #include <ATen/Operators.h>
  3. #include <ATen/functorch/PlumbingHelper.h>
  4. namespace at { namespace functorch {
  5. template <typename batch_rule_t, batch_rule_t batch_rule>
  6. at::Tensor _cast_Byte_generated_plumbing(const at::Tensor & self, bool non_blocking) {
  7. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8. auto maybe_layer = maybeCurrentDynamicLayer();
  9. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10. int64_t cur_level = maybe_layer->layerId();
  11. if (!isBatchedAtLevel(self, cur_level)) {
  12. return at::_ops::_cast_Byte::call(self, non_blocking);
  13. }
  14. Tensor self_value;
  15. optional<int64_t> self_bdim;
  16. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17. auto results = batch_rule(self_value, self_bdim, non_blocking);
  18. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  19. }
  20. template <typename batch_rule_t, batch_rule_t batch_rule>
  21. at::Tensor _cast_Char_generated_plumbing(const at::Tensor & self, bool non_blocking) {
  22. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23. auto maybe_layer = maybeCurrentDynamicLayer();
  24. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25. int64_t cur_level = maybe_layer->layerId();
  26. if (!isBatchedAtLevel(self, cur_level)) {
  27. return at::_ops::_cast_Char::call(self, non_blocking);
  28. }
  29. Tensor self_value;
  30. optional<int64_t> self_bdim;
  31. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  32. auto results = batch_rule(self_value, self_bdim, non_blocking);
  33. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  34. }
  35. template <typename batch_rule_t, batch_rule_t batch_rule>
  36. at::Tensor _cast_Double_generated_plumbing(const at::Tensor & self, bool non_blocking) {
  37. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  38. auto maybe_layer = maybeCurrentDynamicLayer();
  39. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  40. int64_t cur_level = maybe_layer->layerId();
  41. if (!isBatchedAtLevel(self, cur_level)) {
  42. return at::_ops::_cast_Double::call(self, non_blocking);
  43. }
  44. Tensor self_value;
  45. optional<int64_t> self_bdim;
  46. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  47. auto results = batch_rule(self_value, self_bdim, non_blocking);
  48. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  49. }
  50. template <typename batch_rule_t, batch_rule_t batch_rule>
  51. at::Tensor _cast_Float_generated_plumbing(const at::Tensor & self, bool non_blocking) {
  52. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  53. auto maybe_layer = maybeCurrentDynamicLayer();
  54. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  55. int64_t cur_level = maybe_layer->layerId();
  56. if (!isBatchedAtLevel(self, cur_level)) {
  57. return at::_ops::_cast_Float::call(self, non_blocking);
  58. }
  59. Tensor self_value;
  60. optional<int64_t> self_bdim;
  61. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  62. auto results = batch_rule(self_value, self_bdim, non_blocking);
  63. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  64. }
  65. template <typename batch_rule_t, batch_rule_t batch_rule>
  66. at::Tensor _cast_Int_generated_plumbing(const at::Tensor & self, bool non_blocking) {
  67. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  68. auto maybe_layer = maybeCurrentDynamicLayer();
  69. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  70. int64_t cur_level = maybe_layer->layerId();
  71. if (!isBatchedAtLevel(self, cur_level)) {
  72. return at::_ops::_cast_Int::call(self, non_blocking);
  73. }
  74. Tensor self_value;
  75. optional<int64_t> self_bdim;
  76. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  77. auto results = batch_rule(self_value, self_bdim, non_blocking);
  78. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  79. }
  80. template <typename batch_rule_t, batch_rule_t batch_rule>
  81. at::Tensor _cast_Long_generated_plumbing(const at::Tensor & self, bool non_blocking) {
  82. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  83. auto maybe_layer = maybeCurrentDynamicLayer();
  84. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  85. int64_t cur_level = maybe_layer->layerId();
  86. if (!isBatchedAtLevel(self, cur_level)) {
  87. return at::_ops::_cast_Long::call(self, non_blocking);
  88. }
  89. Tensor self_value;
  90. optional<int64_t> self_bdim;
  91. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  92. auto results = batch_rule(self_value, self_bdim, non_blocking);
  93. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  94. }
  95. template <typename batch_rule_t, batch_rule_t batch_rule>
  96. at::Tensor _cast_Short_generated_plumbing(const at::Tensor & self, bool non_blocking) {
  97. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  98. auto maybe_layer = maybeCurrentDynamicLayer();
  99. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  100. int64_t cur_level = maybe_layer->layerId();
  101. if (!isBatchedAtLevel(self, cur_level)) {
  102. return at::_ops::_cast_Short::call(self, non_blocking);
  103. }
  104. Tensor self_value;
  105. optional<int64_t> self_bdim;
  106. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  107. auto results = batch_rule(self_value, self_bdim, non_blocking);
  108. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  109. }
  110. template <typename batch_rule_t, batch_rule_t batch_rule>
  111. at::Tensor _cast_Half_generated_plumbing(const at::Tensor & self, bool non_blocking) {
  112. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  113. auto maybe_layer = maybeCurrentDynamicLayer();
  114. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  115. int64_t cur_level = maybe_layer->layerId();
  116. if (!isBatchedAtLevel(self, cur_level)) {
  117. return at::_ops::_cast_Half::call(self, non_blocking);
  118. }
  119. Tensor self_value;
  120. optional<int64_t> self_bdim;
  121. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  122. auto results = batch_rule(self_value, self_bdim, non_blocking);
  123. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  124. }
  125. template <typename batch_rule_t, batch_rule_t batch_rule>
  126. void _backward_generated_plumbing(const at::Tensor & self, at::TensorList inputs, const c10::optional<at::Tensor> & gradient, c10::optional<bool> retain_graph, bool create_graph) {
  127. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  128. auto maybe_layer = maybeCurrentDynamicLayer();
  129. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  130. int64_t cur_level = maybe_layer->layerId();
  131. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(inputs, cur_level) && !isBatchedAtLevel(gradient, cur_level)) {
  132. return at::_ops::_backward::call(self, inputs, gradient, retain_graph, create_graph);
  133. }
  134. Tensor self_value;
  135. optional<int64_t> self_bdim;
  136. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  137. optional<Tensor> gradient_value;
  138. optional<int64_t> gradient_bdim;
  139. if (gradient) {
  140. std::tie(gradient_value, gradient_bdim) = unwrapTensorAtLevel(gradient.value(), cur_level);
  141. }
  142. batch_rule(self_value, self_bdim, inputs, gradient_value, gradient_bdim, retain_graph, create_graph);
  143. }
  144. template <typename batch_rule_t, batch_rule_t batch_rule>
  145. void set_data_generated_plumbing(at::Tensor & self, const at::Tensor & new_data) {
  146. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  147. auto maybe_layer = maybeCurrentDynamicLayer();
  148. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  149. int64_t cur_level = maybe_layer->layerId();
  150. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(new_data, cur_level)) {
  151. return at::_ops::set_data::call(self, new_data);
  152. }
  153. Tensor self_value;
  154. optional<int64_t> self_bdim;
  155. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  156. Tensor new_data_value;
  157. optional<int64_t> new_data_bdim;
  158. std::tie(new_data_value, new_data_bdim) = unwrapTensorAtLevel(new_data, cur_level);
  159. batch_rule(self_value, self_bdim, new_data_value, new_data_bdim);
  160. }
  161. template <typename batch_rule_t, batch_rule_t batch_rule>
  162. at::Tensor data_generated_plumbing(const at::Tensor & self) {
  163. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  164. auto maybe_layer = maybeCurrentDynamicLayer();
  165. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  166. int64_t cur_level = maybe_layer->layerId();
  167. if (!isBatchedAtLevel(self, cur_level)) {
  168. return at::_ops::data::call(self);
  169. }
  170. Tensor self_value;
  171. optional<int64_t> self_bdim;
  172. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  173. auto results = batch_rule(self_value, self_bdim);
  174. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  175. }
  176. template <typename batch_rule_t, batch_rule_t batch_rule>
  177. at::Tensor & requires_grad__generated_plumbing(at::Tensor & self, bool requires_grad) {
  178. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  179. auto maybe_layer = maybeCurrentDynamicLayer();
  180. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  181. int64_t cur_level = maybe_layer->layerId();
  182. if (!isBatchedAtLevel(self, cur_level)) {
  183. return at::_ops::requires_grad_::call(self, requires_grad);
  184. }
  185. Tensor self_value;
  186. optional<int64_t> self_bdim;
  187. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  188. batch_rule(self_value, self_bdim, requires_grad);
  189. return self;
  190. }
  191. template <typename batch_rule_t, batch_rule_t batch_rule>
  192. void retain_grad_generated_plumbing(at::Tensor & self) {
  193. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  194. auto maybe_layer = maybeCurrentDynamicLayer();
  195. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  196. int64_t cur_level = maybe_layer->layerId();
  197. if (!isBatchedAtLevel(self, cur_level)) {
  198. return at::_ops::retain_grad::call(self);
  199. }
  200. Tensor self_value;
  201. optional<int64_t> self_bdim;
  202. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  203. batch_rule(self_value, self_bdim);
  204. }
  205. template <typename batch_rule_t, batch_rule_t batch_rule>
  206. at::Tensor _fw_primal_generated_plumbing(const at::Tensor & self, int64_t level) {
  207. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  208. auto maybe_layer = maybeCurrentDynamicLayer();
  209. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  210. int64_t cur_level = maybe_layer->layerId();
  211. if (!isBatchedAtLevel(self, cur_level)) {
  212. return at::_ops::_fw_primal::call(self, level);
  213. }
  214. Tensor self_value;
  215. optional<int64_t> self_bdim;
  216. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  217. auto results = batch_rule(self_value, self_bdim, level);
  218. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  219. }
  220. template <typename batch_rule_t, batch_rule_t batch_rule>
  221. at::Tensor _make_dual_generated_plumbing(const at::Tensor & primal, const at::Tensor & tangent, int64_t level) {
  222. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  223. auto maybe_layer = maybeCurrentDynamicLayer();
  224. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  225. int64_t cur_level = maybe_layer->layerId();
  226. if (!isBatchedAtLevel(primal, cur_level) && !isBatchedAtLevel(tangent, cur_level)) {
  227. return at::_ops::_make_dual::call(primal, tangent, level);
  228. }
  229. Tensor primal_value;
  230. optional<int64_t> primal_bdim;
  231. std::tie(primal_value, primal_bdim) = unwrapTensorAtLevel(primal, cur_level);
  232. Tensor tangent_value;
  233. optional<int64_t> tangent_bdim;
  234. std::tie(tangent_value, tangent_bdim) = unwrapTensorAtLevel(tangent, cur_level);
  235. auto results = batch_rule(primal_value, primal_bdim, tangent_value, tangent_bdim, level);
  236. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  237. }
  238. template <typename batch_rule_t, batch_rule_t batch_rule>
  239. ::std::tuple<at::Tensor,at::Tensor> _unpack_dual_generated_plumbing(const at::Tensor & dual, int64_t level) {
  240. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  241. auto maybe_layer = maybeCurrentDynamicLayer();
  242. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  243. int64_t cur_level = maybe_layer->layerId();
  244. if (!isBatchedAtLevel(dual, cur_level)) {
  245. return at::_ops::_unpack_dual::call(dual, level);
  246. }
  247. Tensor dual_value;
  248. optional<int64_t> dual_bdim;
  249. std::tie(dual_value, dual_bdim) = unwrapTensorAtLevel(dual, cur_level);
  250. auto results = batch_rule(dual_value, dual_bdim, level);
  251. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  252. }
  253. template <typename batch_rule_t, batch_rule_t batch_rule>
  254. at::Tensor _new_zeros_with_same_feature_meta_generated_plumbing(const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims) {
  255. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  256. auto maybe_layer = maybeCurrentDynamicLayer();
  257. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  258. int64_t cur_level = maybe_layer->layerId();
  259. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  260. return at::_ops::_new_zeros_with_same_feature_meta::call(self, other, self_num_batch_dims);
  261. }
  262. Tensor self_value;
  263. optional<int64_t> self_bdim;
  264. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  265. Tensor other_value;
  266. optional<int64_t> other_bdim;
  267. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  268. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, self_num_batch_dims);
  269. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  270. }
  271. template <typename batch_rule_t, batch_rule_t batch_rule>
  272. at::Tensor rename_generated_plumbing(const at::Tensor & self, c10::optional<at::DimnameList> names) {
  273. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  274. auto maybe_layer = maybeCurrentDynamicLayer();
  275. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  276. int64_t cur_level = maybe_layer->layerId();
  277. if (!isBatchedAtLevel(self, cur_level)) {
  278. return at::_ops::rename::call(self, names);
  279. }
  280. Tensor self_value;
  281. optional<int64_t> self_bdim;
  282. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  283. auto results = batch_rule(self_value, self_bdim, names);
  284. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  285. }
  286. template <typename batch_rule_t, batch_rule_t batch_rule>
  287. at::Tensor align_to_generated_plumbing(const at::Tensor & self, at::DimnameList names) {
  288. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  289. auto maybe_layer = maybeCurrentDynamicLayer();
  290. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  291. int64_t cur_level = maybe_layer->layerId();
  292. if (!isBatchedAtLevel(self, cur_level)) {
  293. return at::_ops::align_to::call(self, names);
  294. }
  295. Tensor self_value;
  296. optional<int64_t> self_bdim;
  297. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  298. auto results = batch_rule(self_value, self_bdim, names);
  299. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  300. }
  301. template <typename batch_rule_t, batch_rule_t batch_rule>
  302. at::Tensor align_to_ellipsis_idx_generated_plumbing(const at::Tensor & self, at::DimnameList order, int64_t ellipsis_idx) {
  303. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  304. auto maybe_layer = maybeCurrentDynamicLayer();
  305. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  306. int64_t cur_level = maybe_layer->layerId();
  307. if (!isBatchedAtLevel(self, cur_level)) {
  308. return at::_ops::align_to_ellipsis_idx::call(self, order, ellipsis_idx);
  309. }
  310. Tensor self_value;
  311. optional<int64_t> self_bdim;
  312. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  313. auto results = batch_rule(self_value, self_bdim, order, ellipsis_idx);
  314. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  315. }
  316. template <typename batch_rule_t, batch_rule_t batch_rule>
  317. at::Tensor align_as_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  318. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  319. auto maybe_layer = maybeCurrentDynamicLayer();
  320. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  321. int64_t cur_level = maybe_layer->layerId();
  322. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  323. return at::_ops::align_as::call(self, other);
  324. }
  325. Tensor self_value;
  326. optional<int64_t> self_bdim;
  327. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  328. Tensor other_value;
  329. optional<int64_t> other_bdim;
  330. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  331. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  332. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  333. }
  334. template <typename batch_rule_t, batch_rule_t batch_rule>
  335. ::std::vector<at::Tensor> align_tensors_generated_plumbing(at::TensorList tensors) {
  336. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  337. auto maybe_layer = maybeCurrentDynamicLayer();
  338. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  339. int64_t cur_level = maybe_layer->layerId();
  340. if (!isBatchedAtLevel(tensors, cur_level)) {
  341. return at::_ops::align_tensors::call(tensors);
  342. }
  343. auto results = batch_rule(tensors);
  344. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  345. }
  346. template <typename batch_rule_t, batch_rule_t batch_rule>
  347. void _assert_async_generated_plumbing(const at::Tensor & self) {
  348. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  349. auto maybe_layer = maybeCurrentDynamicLayer();
  350. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  351. int64_t cur_level = maybe_layer->layerId();
  352. if (!isBatchedAtLevel(self, cur_level)) {
  353. return at::_ops::_assert_async::call(self);
  354. }
  355. Tensor self_value;
  356. optional<int64_t> self_bdim;
  357. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  358. batch_rule(self_value, self_bdim);
  359. }
  360. template <typename batch_rule_t, batch_rule_t batch_rule>
  361. void _assert_tensor_metadata_generated_plumbing(const at::Tensor & a, at::OptionalIntArrayRef size, at::OptionalIntArrayRef stride, c10::optional<at::ScalarType> dtype) {
  362. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  363. auto maybe_layer = maybeCurrentDynamicLayer();
  364. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  365. int64_t cur_level = maybe_layer->layerId();
  366. if (!isBatchedAtLevel(a, cur_level)) {
  367. return at::_ops::_assert_tensor_metadata::call(a, size, stride, dtype);
  368. }
  369. Tensor a_value;
  370. optional<int64_t> a_bdim;
  371. std::tie(a_value, a_bdim) = unwrapTensorAtLevel(a, cur_level);
  372. batch_rule(a_value, a_bdim, size, stride, dtype);
  373. }
  374. template <typename batch_rule_t, batch_rule_t batch_rule>
  375. at::Tensor refine_names_generated_plumbing(const at::Tensor & self, at::DimnameList names) {
  376. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  377. auto maybe_layer = maybeCurrentDynamicLayer();
  378. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  379. int64_t cur_level = maybe_layer->layerId();
  380. if (!isBatchedAtLevel(self, cur_level)) {
  381. return at::_ops::refine_names::call(self, names);
  382. }
  383. Tensor self_value;
  384. optional<int64_t> self_bdim;
  385. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  386. auto results = batch_rule(self_value, self_bdim, names);
  387. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  388. }
  389. template <typename batch_rule_t, batch_rule_t batch_rule>
  390. ::std::tuple<at::Tensor,at::Tensor> _cudnn_ctc_loss_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity) {
  391. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  392. auto maybe_layer = maybeCurrentDynamicLayer();
  393. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  394. int64_t cur_level = maybe_layer->layerId();
  395. if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level)) {
  396. return at::_ops::_cudnn_ctc_loss::call(log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity);
  397. }
  398. Tensor log_probs_value;
  399. optional<int64_t> log_probs_bdim;
  400. std::tie(log_probs_value, log_probs_bdim) = unwrapTensorAtLevel(log_probs, cur_level);
  401. Tensor targets_value;
  402. optional<int64_t> targets_bdim;
  403. std::tie(targets_value, targets_bdim) = unwrapTensorAtLevel(targets, cur_level);
  404. auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths, target_lengths, blank, deterministic, zero_infinity);
  405. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  406. }
  407. template <typename batch_rule_t, batch_rule_t batch_rule>
  408. ::std::tuple<at::Tensor,at::Tensor> _cudnn_ctc_loss_Tensor_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool deterministic, bool zero_infinity) {
  409. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  410. auto maybe_layer = maybeCurrentDynamicLayer();
  411. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  412. int64_t cur_level = maybe_layer->layerId();
  413. if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level) && !isBatchedAtLevel(input_lengths, cur_level) && !isBatchedAtLevel(target_lengths, cur_level)) {
  414. return at::_ops::_cudnn_ctc_loss_Tensor::call(log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity);
  415. }
  416. Tensor log_probs_value;
  417. optional<int64_t> log_probs_bdim;
  418. std::tie(log_probs_value, log_probs_bdim) = unwrapTensorAtLevel(log_probs, cur_level);
  419. Tensor targets_value;
  420. optional<int64_t> targets_bdim;
  421. std::tie(targets_value, targets_bdim) = unwrapTensorAtLevel(targets, cur_level);
  422. Tensor input_lengths_value;
  423. optional<int64_t> input_lengths_bdim;
  424. std::tie(input_lengths_value, input_lengths_bdim) = unwrapTensorAtLevel(input_lengths, cur_level);
  425. Tensor target_lengths_value;
  426. optional<int64_t> target_lengths_bdim;
  427. std::tie(target_lengths_value, target_lengths_bdim) = unwrapTensorAtLevel(target_lengths, cur_level);
  428. auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths_value, input_lengths_bdim, target_lengths_value, target_lengths_bdim, blank, deterministic, zero_infinity);
  429. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  430. }
  431. template <typename batch_rule_t, batch_rule_t batch_rule>
  432. at::Tensor _cudnn_rnn_flatten_weight_generated_plumbing(at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional) {
  433. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  434. auto maybe_layer = maybeCurrentDynamicLayer();
  435. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  436. int64_t cur_level = maybe_layer->layerId();
  437. if (!isBatchedAtLevel(weight_arr, cur_level)) {
  438. return at::_ops::_cudnn_rnn_flatten_weight::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional);
  439. }
  440. auto results = batch_rule(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional);
  441. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  442. }
  443. template <typename batch_rule_t, batch_rule_t batch_rule>
  444. ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _cudnn_rnn_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional<at::Tensor> & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state) {
  445. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  446. auto maybe_layer = maybeCurrentDynamicLayer();
  447. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  448. int64_t cur_level = maybe_layer->layerId();
  449. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(weight_buf, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(dropout_state, cur_level)) {
  450. return at::_ops::_cudnn_rnn::call(input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state);
  451. }
  452. Tensor input_value;
  453. optional<int64_t> input_bdim;
  454. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  455. Tensor hx_value;
  456. optional<int64_t> hx_bdim;
  457. std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
  458. optional<Tensor> weight_buf_value;
  459. optional<int64_t> weight_buf_bdim;
  460. if (weight_buf) {
  461. std::tie(weight_buf_value, weight_buf_bdim) = unwrapTensorAtLevel(weight_buf.value(), cur_level);
  462. }
  463. optional<Tensor> cx_value;
  464. optional<int64_t> cx_bdim;
  465. if (cx) {
  466. std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level);
  467. }
  468. optional<Tensor> dropout_state_value;
  469. optional<int64_t> dropout_state_bdim;
  470. if (dropout_state) {
  471. std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level);
  472. }
  473. auto results = batch_rule(input_value, input_bdim, weight, weight_stride0, weight_buf_value, weight_buf_bdim, hx_value, hx_bdim, cx_value, cx_bdim, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim);
  474. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level));
  475. }
  476. template <typename batch_rule_t, batch_rule_t batch_rule>
  477. ::std::tuple<at::Tensor,at::Tensor,at::Tensor,::std::vector<at::Tensor>> _cudnn_rnn_backward_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask) {
  478. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  479. auto maybe_layer = maybeCurrentDynamicLayer();
  480. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  481. int64_t cur_level = maybe_layer->layerId();
  482. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(weight_buf, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(dropout_state, cur_level) && !isBatchedAtLevel(reserve, cur_level)) {
  483. return at::_ops::_cudnn_rnn_backward::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask);
  484. }
  485. Tensor input_value;
  486. optional<int64_t> input_bdim;
  487. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  488. Tensor weight_buf_value;
  489. optional<int64_t> weight_buf_bdim;
  490. std::tie(weight_buf_value, weight_buf_bdim) = unwrapTensorAtLevel(weight_buf, cur_level);
  491. Tensor hx_value;
  492. optional<int64_t> hx_bdim;
  493. std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
  494. Tensor output_value;
  495. optional<int64_t> output_bdim;
  496. std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
  497. Tensor reserve_value;
  498. optional<int64_t> reserve_bdim;
  499. std::tie(reserve_value, reserve_bdim) = unwrapTensorAtLevel(reserve, cur_level);
  500. optional<Tensor> cx_value;
  501. optional<int64_t> cx_bdim;
  502. if (cx) {
  503. std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level);
  504. }
  505. optional<Tensor> grad_output_value;
  506. optional<int64_t> grad_output_bdim;
  507. if (grad_output) {
  508. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output.value(), cur_level);
  509. }
  510. optional<Tensor> grad_hy_value;
  511. optional<int64_t> grad_hy_bdim;
  512. if (grad_hy) {
  513. std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
  514. }
  515. optional<Tensor> grad_cy_value;
  516. optional<int64_t> grad_cy_bdim;
  517. if (grad_cy) {
  518. std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
  519. }
  520. optional<Tensor> dropout_state_value;
  521. optional<int64_t> dropout_state_bdim;
  522. if (dropout_state) {
  523. std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level);
  524. }
  525. auto results = batch_rule(input_value, input_bdim, weight, weight_stride0, weight_buf_value, weight_buf_bdim, hx_value, hx_bdim, cx_value, cx_bdim, output_value, output_bdim, grad_output_value, grad_output_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim, reserve_value, reserve_bdim, output_mask);
  526. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatchedVector(std::get<6>(results), std::get<7>(results), cur_level));
  527. }
  528. template <typename batch_rule_t, batch_rule_t batch_rule>
  529. ::std::tuple<at::Tensor,at::Tensor> _fused_dropout_generated_plumbing(const at::Tensor & self, double p, c10::optional<at::Generator> generator) {
  530. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  531. auto maybe_layer = maybeCurrentDynamicLayer();
  532. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  533. int64_t cur_level = maybe_layer->layerId();
  534. if (!isBatchedAtLevel(self, cur_level)) {
  535. return at::_ops::_fused_dropout::call(self, p, generator);
  536. }
  537. Tensor self_value;
  538. optional<int64_t> self_bdim;
  539. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  540. auto results = batch_rule(self_value, self_bdim, p, generator);
  541. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  542. }
  543. template <typename batch_rule_t, batch_rule_t batch_rule>
  544. at::Tensor _masked_scale_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, double scale) {
  545. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  546. auto maybe_layer = maybeCurrentDynamicLayer();
  547. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  548. int64_t cur_level = maybe_layer->layerId();
  549. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
  550. return at::_ops::_masked_scale::call(self, mask, scale);
  551. }
  552. Tensor self_value;
  553. optional<int64_t> self_bdim;
  554. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  555. Tensor mask_value;
  556. optional<int64_t> mask_bdim;
  557. std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
  558. auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, scale);
  559. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  560. }
  561. template <typename batch_rule_t, batch_rule_t batch_rule>
  562. ::std::tuple<at::Tensor,at::Tensor> native_dropout_generated_plumbing(const at::Tensor & input, double p, c10::optional<bool> train) {
  563. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  564. auto maybe_layer = maybeCurrentDynamicLayer();
  565. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  566. int64_t cur_level = maybe_layer->layerId();
  567. if (!isBatchedAtLevel(input, cur_level)) {
  568. return at::_ops::native_dropout::call(input, p, train);
  569. }
  570. Tensor input_value;
  571. optional<int64_t> input_bdim;
  572. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  573. auto results = batch_rule(input_value, input_bdim, p, train);
  574. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  575. }
  576. template <typename batch_rule_t, batch_rule_t batch_rule>
  577. at::Tensor native_dropout_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & mask, double scale) {
  578. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  579. auto maybe_layer = maybeCurrentDynamicLayer();
  580. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  581. int64_t cur_level = maybe_layer->layerId();
  582. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
  583. return at::_ops::native_dropout_backward::call(grad_output, mask, scale);
  584. }
  585. Tensor grad_output_value;
  586. optional<int64_t> grad_output_bdim;
  587. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  588. Tensor mask_value;
  589. optional<int64_t> mask_bdim;
  590. std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
  591. auto results = batch_rule(grad_output_value, grad_output_bdim, mask_value, mask_bdim, scale);
  592. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  593. }
  594. template <typename batch_rule_t, batch_rule_t batch_rule>
  595. ::std::tuple<at::Tensor,at::Tensor> _sobol_engine_draw_generated_plumbing(const at::Tensor & quasi, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated, c10::optional<at::ScalarType> dtype) {
  596. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  597. auto maybe_layer = maybeCurrentDynamicLayer();
  598. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  599. int64_t cur_level = maybe_layer->layerId();
  600. if (!isBatchedAtLevel(quasi, cur_level) && !isBatchedAtLevel(sobolstate, cur_level)) {
  601. return at::_ops::_sobol_engine_draw::call(quasi, n, sobolstate, dimension, num_generated, dtype);
  602. }
  603. Tensor quasi_value;
  604. optional<int64_t> quasi_bdim;
  605. std::tie(quasi_value, quasi_bdim) = unwrapTensorAtLevel(quasi, cur_level);
  606. Tensor sobolstate_value;
  607. optional<int64_t> sobolstate_bdim;
  608. std::tie(sobolstate_value, sobolstate_bdim) = unwrapTensorAtLevel(sobolstate, cur_level);
  609. auto results = batch_rule(quasi_value, quasi_bdim, n, sobolstate_value, sobolstate_bdim, dimension, num_generated, dtype);
  610. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  611. }
  612. template <typename batch_rule_t, batch_rule_t batch_rule>
  613. at::Tensor & _sobol_engine_ff__generated_plumbing(at::Tensor & self, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated) {
  614. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  615. auto maybe_layer = maybeCurrentDynamicLayer();
  616. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  617. int64_t cur_level = maybe_layer->layerId();
  618. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(sobolstate, cur_level)) {
  619. return at::_ops::_sobol_engine_ff_::call(self, n, sobolstate, dimension, num_generated);
  620. }
  621. Tensor self_value;
  622. optional<int64_t> self_bdim;
  623. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  624. Tensor sobolstate_value;
  625. optional<int64_t> sobolstate_bdim;
  626. std::tie(sobolstate_value, sobolstate_bdim) = unwrapTensorAtLevel(sobolstate, cur_level);
  627. batch_rule(self_value, self_bdim, n, sobolstate_value, sobolstate_bdim, dimension, num_generated);
  628. return self;
  629. }
  630. template <typename batch_rule_t, batch_rule_t batch_rule>
  631. at::Tensor & _sobol_engine_scramble__generated_plumbing(at::Tensor & self, const at::Tensor & ltm, int64_t dimension) {
  632. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  633. auto maybe_layer = maybeCurrentDynamicLayer();
  634. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  635. int64_t cur_level = maybe_layer->layerId();
  636. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(ltm, cur_level)) {
  637. return at::_ops::_sobol_engine_scramble_::call(self, ltm, dimension);
  638. }
  639. Tensor self_value;
  640. optional<int64_t> self_bdim;
  641. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  642. Tensor ltm_value;
  643. optional<int64_t> ltm_bdim;
  644. std::tie(ltm_value, ltm_bdim) = unwrapTensorAtLevel(ltm, cur_level);
  645. batch_rule(self_value, self_bdim, ltm_value, ltm_bdim, dimension);
  646. return self;
  647. }
  648. template <typename batch_rule_t, batch_rule_t batch_rule>
  649. at::Tensor & _sobol_engine_initialize_state__generated_plumbing(at::Tensor & self, int64_t dimension) {
  650. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  651. auto maybe_layer = maybeCurrentDynamicLayer();
  652. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  653. int64_t cur_level = maybe_layer->layerId();
  654. if (!isBatchedAtLevel(self, cur_level)) {
  655. return at::_ops::_sobol_engine_initialize_state_::call(self, dimension);
  656. }
  657. Tensor self_value;
  658. optional<int64_t> self_bdim;
  659. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  660. batch_rule(self_value, self_bdim, dimension);
  661. return self;
  662. }
  663. template <typename batch_rule_t, batch_rule_t batch_rule>
  664. at::Tensor _reshape_from_tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & shape) {
  665. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  666. auto maybe_layer = maybeCurrentDynamicLayer();
  667. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  668. int64_t cur_level = maybe_layer->layerId();
  669. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(shape, cur_level)) {
  670. return at::_ops::_reshape_from_tensor::call(self, shape);
  671. }
  672. Tensor self_value;
  673. optional<int64_t> self_bdim;
  674. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  675. Tensor shape_value;
  676. optional<int64_t> shape_bdim;
  677. std::tie(shape_value, shape_bdim) = unwrapTensorAtLevel(shape, cur_level);
  678. auto results = batch_rule(self_value, self_bdim, shape_value, shape_bdim);
  679. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  680. }
  681. template <typename batch_rule_t, batch_rule_t batch_rule>
  682. at::Tensor _shape_as_tensor_generated_plumbing(const at::Tensor & self) {
  683. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  684. auto maybe_layer = maybeCurrentDynamicLayer();
  685. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  686. int64_t cur_level = maybe_layer->layerId();
  687. if (!isBatchedAtLevel(self, cur_level)) {
  688. return at::_ops::_shape_as_tensor::call(self);
  689. }
  690. Tensor self_value;
  691. optional<int64_t> self_bdim;
  692. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  693. auto results = batch_rule(self_value, self_bdim);
  694. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  695. }
  696. template <typename batch_rule_t, batch_rule_t batch_rule>
  697. at::Tensor dropout_generated_plumbing(const at::Tensor & input, double p, bool train) {
  698. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  699. auto maybe_layer = maybeCurrentDynamicLayer();
  700. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  701. int64_t cur_level = maybe_layer->layerId();
  702. if (!isBatchedAtLevel(input, cur_level)) {
  703. return at::_ops::dropout::call(input, p, train);
  704. }
  705. Tensor input_value;
  706. optional<int64_t> input_bdim;
  707. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  708. auto results = batch_rule(input_value, input_bdim, p, train);
  709. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  710. }
  711. template <typename batch_rule_t, batch_rule_t batch_rule>
  712. at::Tensor & dropout__generated_plumbing(at::Tensor & self, double p, bool train) {
  713. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  714. auto maybe_layer = maybeCurrentDynamicLayer();
  715. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  716. int64_t cur_level = maybe_layer->layerId();
  717. if (!isBatchedAtLevel(self, cur_level)) {
  718. return at::_ops::dropout_::call(self, p, train);
  719. }
  720. Tensor self_value;
  721. optional<int64_t> self_bdim;
  722. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  723. batch_rule(self_value, self_bdim, p, train);
  724. return self;
  725. }
  726. template <typename batch_rule_t, batch_rule_t batch_rule>
  727. at::Tensor feature_dropout_generated_plumbing(const at::Tensor & input, double p, bool train) {
  728. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  729. auto maybe_layer = maybeCurrentDynamicLayer();
  730. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  731. int64_t cur_level = maybe_layer->layerId();
  732. if (!isBatchedAtLevel(input, cur_level)) {
  733. return at::_ops::feature_dropout::call(input, p, train);
  734. }
  735. Tensor input_value;
  736. optional<int64_t> input_bdim;
  737. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  738. auto results = batch_rule(input_value, input_bdim, p, train);
  739. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  740. }
  741. template <typename batch_rule_t, batch_rule_t batch_rule>
  742. at::Tensor & feature_dropout__generated_plumbing(at::Tensor & self, double p, bool train) {
  743. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  744. auto maybe_layer = maybeCurrentDynamicLayer();
  745. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  746. int64_t cur_level = maybe_layer->layerId();
  747. if (!isBatchedAtLevel(self, cur_level)) {
  748. return at::_ops::feature_dropout_::call(self, p, train);
  749. }
  750. Tensor self_value;
  751. optional<int64_t> self_bdim;
  752. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  753. batch_rule(self_value, self_bdim, p, train);
  754. return self;
  755. }
  756. template <typename batch_rule_t, batch_rule_t batch_rule>
  757. at::Tensor alpha_dropout_generated_plumbing(const at::Tensor & input, double p, bool train) {
  758. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  759. auto maybe_layer = maybeCurrentDynamicLayer();
  760. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  761. int64_t cur_level = maybe_layer->layerId();
  762. if (!isBatchedAtLevel(input, cur_level)) {
  763. return at::_ops::alpha_dropout::call(input, p, train);
  764. }
  765. Tensor input_value;
  766. optional<int64_t> input_bdim;
  767. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  768. auto results = batch_rule(input_value, input_bdim, p, train);
  769. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  770. }
  771. template <typename batch_rule_t, batch_rule_t batch_rule>
  772. at::Tensor & alpha_dropout__generated_plumbing(at::Tensor & self, double p, bool train) {
  773. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  774. auto maybe_layer = maybeCurrentDynamicLayer();
  775. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  776. int64_t cur_level = maybe_layer->layerId();
  777. if (!isBatchedAtLevel(self, cur_level)) {
  778. return at::_ops::alpha_dropout_::call(self, p, train);
  779. }
  780. Tensor self_value;
  781. optional<int64_t> self_bdim;
  782. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  783. batch_rule(self_value, self_bdim, p, train);
  784. return self;
  785. }
  786. template <typename batch_rule_t, batch_rule_t batch_rule>
  787. at::Tensor feature_alpha_dropout_generated_plumbing(const at::Tensor & input, double p, bool train) {
  788. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  789. auto maybe_layer = maybeCurrentDynamicLayer();
  790. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  791. int64_t cur_level = maybe_layer->layerId();
  792. if (!isBatchedAtLevel(input, cur_level)) {
  793. return at::_ops::feature_alpha_dropout::call(input, p, train);
  794. }
  795. Tensor input_value;
  796. optional<int64_t> input_bdim;
  797. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  798. auto results = batch_rule(input_value, input_bdim, p, train);
  799. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  800. }
  801. template <typename batch_rule_t, batch_rule_t batch_rule>
  802. at::Tensor & feature_alpha_dropout__generated_plumbing(at::Tensor & self, double p, bool train) {
  803. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  804. auto maybe_layer = maybeCurrentDynamicLayer();
  805. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  806. int64_t cur_level = maybe_layer->layerId();
  807. if (!isBatchedAtLevel(self, cur_level)) {
  808. return at::_ops::feature_alpha_dropout_::call(self, p, train);
  809. }
  810. Tensor self_value;
  811. optional<int64_t> self_bdim;
  812. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  813. batch_rule(self_value, self_bdim, p, train);
  814. return self;
  815. }
  816. template <typename batch_rule_t, batch_rule_t batch_rule>
  817. at::Tensor abs_generated_plumbing(const at::Tensor & self) {
  818. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  819. auto maybe_layer = maybeCurrentDynamicLayer();
  820. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  821. int64_t cur_level = maybe_layer->layerId();
  822. if (!isBatchedAtLevel(self, cur_level)) {
  823. return at::_ops::abs::call(self);
  824. }
  825. Tensor self_value;
  826. optional<int64_t> self_bdim;
  827. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  828. auto results = batch_rule(self_value, self_bdim);
  829. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  830. }
  831. template <typename batch_rule_t, batch_rule_t batch_rule>
  832. at::Tensor & abs__generated_plumbing(at::Tensor & self) {
  833. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  834. auto maybe_layer = maybeCurrentDynamicLayer();
  835. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  836. int64_t cur_level = maybe_layer->layerId();
  837. if (!isBatchedAtLevel(self, cur_level)) {
  838. return at::_ops::abs_::call(self);
  839. }
  840. Tensor self_value;
  841. optional<int64_t> self_bdim;
  842. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  843. batch_rule(self_value, self_bdim);
  844. return self;
  845. }
  846. template <typename batch_rule_t, batch_rule_t batch_rule>
  847. at::Tensor absolute_generated_plumbing(const at::Tensor & self) {
  848. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  849. auto maybe_layer = maybeCurrentDynamicLayer();
  850. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  851. int64_t cur_level = maybe_layer->layerId();
  852. if (!isBatchedAtLevel(self, cur_level)) {
  853. return at::_ops::absolute::call(self);
  854. }
  855. Tensor self_value;
  856. optional<int64_t> self_bdim;
  857. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  858. auto results = batch_rule(self_value, self_bdim);
  859. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  860. }
  861. template <typename batch_rule_t, batch_rule_t batch_rule>
  862. at::Tensor & absolute__generated_plumbing(at::Tensor & self) {
  863. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  864. auto maybe_layer = maybeCurrentDynamicLayer();
  865. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  866. int64_t cur_level = maybe_layer->layerId();
  867. if (!isBatchedAtLevel(self, cur_level)) {
  868. return at::_ops::absolute_::call(self);
  869. }
  870. Tensor self_value;
  871. optional<int64_t> self_bdim;
  872. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  873. batch_rule(self_value, self_bdim);
  874. return self;
  875. }
  876. template <typename batch_rule_t, batch_rule_t batch_rule>
  877. at::Tensor angle_generated_plumbing(const at::Tensor & self) {
  878. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  879. auto maybe_layer = maybeCurrentDynamicLayer();
  880. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  881. int64_t cur_level = maybe_layer->layerId();
  882. if (!isBatchedAtLevel(self, cur_level)) {
  883. return at::_ops::angle::call(self);
  884. }
  885. Tensor self_value;
  886. optional<int64_t> self_bdim;
  887. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  888. auto results = batch_rule(self_value, self_bdim);
  889. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  890. }
  891. template <typename batch_rule_t, batch_rule_t batch_rule>
  892. at::Tensor view_as_real_generated_plumbing(const at::Tensor & self) {
  893. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  894. auto maybe_layer = maybeCurrentDynamicLayer();
  895. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  896. int64_t cur_level = maybe_layer->layerId();
  897. if (!isBatchedAtLevel(self, cur_level)) {
  898. return at::_ops::view_as_real::call(self);
  899. }
  900. Tensor self_value;
  901. optional<int64_t> self_bdim;
  902. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  903. auto results = batch_rule(self_value, self_bdim);
  904. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  905. }
  906. template <typename batch_rule_t, batch_rule_t batch_rule>
  907. at::Tensor view_as_complex_generated_plumbing(const at::Tensor & self) {
  908. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  909. auto maybe_layer = maybeCurrentDynamicLayer();
  910. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  911. int64_t cur_level = maybe_layer->layerId();
  912. if (!isBatchedAtLevel(self, cur_level)) {
  913. return at::_ops::view_as_complex::call(self);
  914. }
  915. Tensor self_value;
  916. optional<int64_t> self_bdim;
  917. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  918. auto results = batch_rule(self_value, self_bdim);
  919. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  920. }
  921. template <typename batch_rule_t, batch_rule_t batch_rule>
  922. at::Tensor sgn_generated_plumbing(const at::Tensor & self) {
  923. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  924. auto maybe_layer = maybeCurrentDynamicLayer();
  925. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  926. int64_t cur_level = maybe_layer->layerId();
  927. if (!isBatchedAtLevel(self, cur_level)) {
  928. return at::_ops::sgn::call(self);
  929. }
  930. Tensor self_value;
  931. optional<int64_t> self_bdim;
  932. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  933. auto results = batch_rule(self_value, self_bdim);
  934. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  935. }
  936. template <typename batch_rule_t, batch_rule_t batch_rule>
  937. at::Tensor & sgn__generated_plumbing(at::Tensor & self) {
  938. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  939. auto maybe_layer = maybeCurrentDynamicLayer();
  940. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  941. int64_t cur_level = maybe_layer->layerId();
  942. if (!isBatchedAtLevel(self, cur_level)) {
  943. return at::_ops::sgn_::call(self);
  944. }
  945. Tensor self_value;
  946. optional<int64_t> self_bdim;
  947. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  948. batch_rule(self_value, self_bdim);
  949. return self;
  950. }
  951. template <typename batch_rule_t, batch_rule_t batch_rule>
  952. at::Tensor chalf_generated_plumbing(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) {
  953. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  954. auto maybe_layer = maybeCurrentDynamicLayer();
  955. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  956. int64_t cur_level = maybe_layer->layerId();
  957. if (!isBatchedAtLevel(self, cur_level)) {
  958. return at::_ops::chalf::call(self, memory_format);
  959. }
  960. Tensor self_value;
  961. optional<int64_t> self_bdim;
  962. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  963. auto results = batch_rule(self_value, self_bdim, memory_format);
  964. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  965. }
  966. template <typename batch_rule_t, batch_rule_t batch_rule>
  967. at::Tensor real_generated_plumbing(const at::Tensor & self) {
  968. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  969. auto maybe_layer = maybeCurrentDynamicLayer();
  970. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  971. int64_t cur_level = maybe_layer->layerId();
  972. if (!isBatchedAtLevel(self, cur_level)) {
  973. return at::_ops::real::call(self);
  974. }
  975. Tensor self_value;
  976. optional<int64_t> self_bdim;
  977. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  978. auto results = batch_rule(self_value, self_bdim);
  979. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  980. }
  981. template <typename batch_rule_t, batch_rule_t batch_rule>
  982. at::Tensor imag_generated_plumbing(const at::Tensor & self) {
  983. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  984. auto maybe_layer = maybeCurrentDynamicLayer();
  985. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  986. int64_t cur_level = maybe_layer->layerId();
  987. if (!isBatchedAtLevel(self, cur_level)) {
  988. return at::_ops::imag::call(self);
  989. }
  990. Tensor self_value;
  991. optional<int64_t> self_bdim;
  992. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  993. auto results = batch_rule(self_value, self_bdim);
  994. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  995. }
  996. template <typename batch_rule_t, batch_rule_t batch_rule>
  997. at::Tensor _conj_generated_plumbing(const at::Tensor & self) {
  998. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  999. auto maybe_layer = maybeCurrentDynamicLayer();
  1000. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1001. int64_t cur_level = maybe_layer->layerId();
  1002. if (!isBatchedAtLevel(self, cur_level)) {
  1003. return at::_ops::_conj::call(self);
  1004. }
  1005. Tensor self_value;
  1006. optional<int64_t> self_bdim;
  1007. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1008. auto results = batch_rule(self_value, self_bdim);
  1009. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1010. }
  1011. template <typename batch_rule_t, batch_rule_t batch_rule>
  1012. at::Tensor conj_generated_plumbing(const at::Tensor & self) {
  1013. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1014. auto maybe_layer = maybeCurrentDynamicLayer();
  1015. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1016. int64_t cur_level = maybe_layer->layerId();
  1017. if (!isBatchedAtLevel(self, cur_level)) {
  1018. return at::_ops::conj::call(self);
  1019. }
  1020. Tensor self_value;
  1021. optional<int64_t> self_bdim;
  1022. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1023. auto results = batch_rule(self_value, self_bdim);
  1024. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1025. }
  1026. template <typename batch_rule_t, batch_rule_t batch_rule>
  1027. at::Tensor _conj_physical_generated_plumbing(const at::Tensor & self) {
  1028. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1029. auto maybe_layer = maybeCurrentDynamicLayer();
  1030. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1031. int64_t cur_level = maybe_layer->layerId();
  1032. if (!isBatchedAtLevel(self, cur_level)) {
  1033. return at::_ops::_conj_physical::call(self);
  1034. }
  1035. Tensor self_value;
  1036. optional<int64_t> self_bdim;
  1037. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1038. auto results = batch_rule(self_value, self_bdim);
  1039. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1040. }
  1041. template <typename batch_rule_t, batch_rule_t batch_rule>
  1042. at::Tensor conj_physical_generated_plumbing(const at::Tensor & self) {
  1043. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1044. auto maybe_layer = maybeCurrentDynamicLayer();
  1045. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1046. int64_t cur_level = maybe_layer->layerId();
  1047. if (!isBatchedAtLevel(self, cur_level)) {
  1048. return at::_ops::conj_physical::call(self);
  1049. }
  1050. Tensor self_value;
  1051. optional<int64_t> self_bdim;
  1052. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1053. auto results = batch_rule(self_value, self_bdim);
  1054. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1055. }
  1056. template <typename batch_rule_t, batch_rule_t batch_rule>
  1057. at::Tensor & conj_physical__generated_plumbing(at::Tensor & self) {
  1058. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1059. auto maybe_layer = maybeCurrentDynamicLayer();
  1060. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  1061. int64_t cur_level = maybe_layer->layerId();
  1062. if (!isBatchedAtLevel(self, cur_level)) {
  1063. return at::_ops::conj_physical_::call(self);
  1064. }
  1065. Tensor self_value;
  1066. optional<int64_t> self_bdim;
  1067. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1068. batch_rule(self_value, self_bdim);
  1069. return self;
  1070. }
  1071. template <typename batch_rule_t, batch_rule_t batch_rule>
  1072. at::Tensor resolve_conj_generated_plumbing(const at::Tensor & self) {
  1073. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1074. auto maybe_layer = maybeCurrentDynamicLayer();
  1075. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1076. int64_t cur_level = maybe_layer->layerId();
  1077. if (!isBatchedAtLevel(self, cur_level)) {
  1078. return at::_ops::resolve_conj::call(self);
  1079. }
  1080. Tensor self_value;
  1081. optional<int64_t> self_bdim;
  1082. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1083. auto results = batch_rule(self_value, self_bdim);
  1084. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1085. }
  1086. template <typename batch_rule_t, batch_rule_t batch_rule>
  1087. at::Tensor resolve_neg_generated_plumbing(const at::Tensor & self) {
  1088. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1089. auto maybe_layer = maybeCurrentDynamicLayer();
  1090. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1091. int64_t cur_level = maybe_layer->layerId();
  1092. if (!isBatchedAtLevel(self, cur_level)) {
  1093. return at::_ops::resolve_neg::call(self);
  1094. }
  1095. Tensor self_value;
  1096. optional<int64_t> self_bdim;
  1097. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1098. auto results = batch_rule(self_value, self_bdim);
  1099. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1100. }
  1101. template <typename batch_rule_t, batch_rule_t batch_rule>
  1102. at::Tensor _neg_view_generated_plumbing(const at::Tensor & self) {
  1103. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1104. auto maybe_layer = maybeCurrentDynamicLayer();
  1105. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1106. int64_t cur_level = maybe_layer->layerId();
  1107. if (!isBatchedAtLevel(self, cur_level)) {
  1108. return at::_ops::_neg_view::call(self);
  1109. }
  1110. Tensor self_value;
  1111. optional<int64_t> self_bdim;
  1112. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1113. auto results = batch_rule(self_value, self_bdim);
  1114. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1115. }
  1116. template <typename batch_rule_t, batch_rule_t batch_rule>
  1117. at::Tensor acos_generated_plumbing(const at::Tensor & self) {
  1118. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1119. auto maybe_layer = maybeCurrentDynamicLayer();
  1120. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1121. int64_t cur_level = maybe_layer->layerId();
  1122. if (!isBatchedAtLevel(self, cur_level)) {
  1123. return at::_ops::acos::call(self);
  1124. }
  1125. Tensor self_value;
  1126. optional<int64_t> self_bdim;
  1127. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1128. auto results = batch_rule(self_value, self_bdim);
  1129. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1130. }
  1131. template <typename batch_rule_t, batch_rule_t batch_rule>
  1132. at::Tensor & acos__generated_plumbing(at::Tensor & self) {
  1133. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1134. auto maybe_layer = maybeCurrentDynamicLayer();
  1135. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  1136. int64_t cur_level = maybe_layer->layerId();
  1137. if (!isBatchedAtLevel(self, cur_level)) {
  1138. return at::_ops::acos_::call(self);
  1139. }
  1140. Tensor self_value;
  1141. optional<int64_t> self_bdim;
  1142. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1143. batch_rule(self_value, self_bdim);
  1144. return self;
  1145. }
  1146. template <typename batch_rule_t, batch_rule_t batch_rule>
  1147. at::Tensor arccos_generated_plumbing(const at::Tensor & self) {
  1148. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1149. auto maybe_layer = maybeCurrentDynamicLayer();
  1150. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1151. int64_t cur_level = maybe_layer->layerId();
  1152. if (!isBatchedAtLevel(self, cur_level)) {
  1153. return at::_ops::arccos::call(self);
  1154. }
  1155. Tensor self_value;
  1156. optional<int64_t> self_bdim;
  1157. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1158. auto results = batch_rule(self_value, self_bdim);
  1159. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1160. }
  1161. template <typename batch_rule_t, batch_rule_t batch_rule>
  1162. at::Tensor & arccos__generated_plumbing(at::Tensor & self) {
  1163. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1164. auto maybe_layer = maybeCurrentDynamicLayer();
  1165. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  1166. int64_t cur_level = maybe_layer->layerId();
  1167. if (!isBatchedAtLevel(self, cur_level)) {
  1168. return at::_ops::arccos_::call(self);
  1169. }
  1170. Tensor self_value;
  1171. optional<int64_t> self_bdim;
  1172. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1173. batch_rule(self_value, self_bdim);
  1174. return self;
  1175. }
  1176. template <typename batch_rule_t, batch_rule_t batch_rule>
  1177. at::Tensor avg_pool1d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad) {
  1178. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1179. auto maybe_layer = maybeCurrentDynamicLayer();
  1180. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1181. int64_t cur_level = maybe_layer->layerId();
  1182. if (!isBatchedAtLevel(self, cur_level)) {
  1183. return at::_ops::avg_pool1d::call(self, kernel_size, stride, padding, ceil_mode, count_include_pad);
  1184. }
  1185. Tensor self_value;
  1186. optional<int64_t> self_bdim;
  1187. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1188. auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, ceil_mode, count_include_pad);
  1189. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1190. }
  1191. template <typename batch_rule_t, batch_rule_t batch_rule>
  1192. at::Tensor adaptive_avg_pool1d_generated_plumbing(const at::Tensor & self, at::IntArrayRef output_size) {
  1193. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1194. auto maybe_layer = maybeCurrentDynamicLayer();
  1195. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1196. int64_t cur_level = maybe_layer->layerId();
  1197. if (!isBatchedAtLevel(self, cur_level)) {
  1198. return at::_ops::adaptive_avg_pool1d::call(self, output_size);
  1199. }
  1200. Tensor self_value;
  1201. optional<int64_t> self_bdim;
  1202. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1203. auto results = batch_rule(self_value, self_bdim, output_size);
  1204. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1205. }
  1206. template <typename batch_rule_t, batch_rule_t batch_rule>
  1207. ::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool1d_generated_plumbing(const at::Tensor & self, at::IntArrayRef output_size) {
  1208. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1209. auto maybe_layer = maybeCurrentDynamicLayer();
  1210. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1211. int64_t cur_level = maybe_layer->layerId();
  1212. if (!isBatchedAtLevel(self, cur_level)) {
  1213. return at::_ops::adaptive_max_pool1d::call(self, output_size);
  1214. }
  1215. Tensor self_value;
  1216. optional<int64_t> self_bdim;
  1217. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1218. auto results = batch_rule(self_value, self_bdim, output_size);
  1219. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  1220. }
  1221. template <typename batch_rule_t, batch_rule_t batch_rule>
  1222. at::Tensor add_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
  1223. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1224. auto maybe_layer = maybeCurrentDynamicLayer();
  1225. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1226. int64_t cur_level = maybe_layer->layerId();
  1227. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  1228. return at::_ops::add_Tensor::call(self, other, alpha);
  1229. }
  1230. Tensor self_value;
  1231. optional<int64_t> self_bdim;
  1232. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1233. Tensor other_value;
  1234. optional<int64_t> other_bdim;
  1235. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  1236. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
  1237. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1238. }
  1239. template <typename batch_rule_t, batch_rule_t batch_rule>
  1240. at::Tensor & add__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
  1241. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1242. auto maybe_layer = maybeCurrentDynamicLayer();
  1243. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  1244. int64_t cur_level = maybe_layer->layerId();
  1245. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  1246. return at::_ops::add__Tensor::call(self, other, alpha);
  1247. }
  1248. Tensor self_value;
  1249. optional<int64_t> self_bdim;
  1250. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1251. Tensor other_value;
  1252. optional<int64_t> other_bdim;
  1253. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  1254. batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
  1255. return self;
  1256. }
  1257. template <typename batch_rule_t, batch_rule_t batch_rule>
  1258. at::Tensor _add_relu_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
  1259. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1260. auto maybe_layer = maybeCurrentDynamicLayer();
  1261. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1262. int64_t cur_level = maybe_layer->layerId();
  1263. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  1264. return at::_ops::_add_relu_Tensor::call(self, other, alpha);
  1265. }
  1266. Tensor self_value;
  1267. optional<int64_t> self_bdim;
  1268. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1269. Tensor other_value;
  1270. optional<int64_t> other_bdim;
  1271. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  1272. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
  1273. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1274. }
  1275. template <typename batch_rule_t, batch_rule_t batch_rule>
  1276. at::Tensor & _add_relu__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
  1277. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1278. auto maybe_layer = maybeCurrentDynamicLayer();
  1279. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  1280. int64_t cur_level = maybe_layer->layerId();
  1281. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  1282. return at::_ops::_add_relu__Tensor::call(self, other, alpha);
  1283. }
  1284. Tensor self_value;
  1285. optional<int64_t> self_bdim;
  1286. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1287. Tensor other_value;
  1288. optional<int64_t> other_bdim;
  1289. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  1290. batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
  1291. return self;
  1292. }
  1293. template <typename batch_rule_t, batch_rule_t batch_rule>
  1294. at::Tensor _add_relu_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
  1295. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1296. auto maybe_layer = maybeCurrentDynamicLayer();
  1297. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1298. int64_t cur_level = maybe_layer->layerId();
  1299. if (!isBatchedAtLevel(self, cur_level)) {
  1300. return at::_ops::_add_relu_Scalar::call(self, other, alpha);
  1301. }
  1302. Tensor self_value;
  1303. optional<int64_t> self_bdim;
  1304. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1305. auto results = batch_rule(self_value, self_bdim, other, alpha);
  1306. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1307. }
  1308. template <typename batch_rule_t, batch_rule_t batch_rule>
  1309. at::Tensor & _add_relu__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
  1310. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1311. auto maybe_layer = maybeCurrentDynamicLayer();
  1312. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  1313. int64_t cur_level = maybe_layer->layerId();
  1314. if (!isBatchedAtLevel(self, cur_level)) {
  1315. return at::_ops::_add_relu__Scalar::call(self, other, alpha);
  1316. }
  1317. Tensor self_value;
  1318. optional<int64_t> self_bdim;
  1319. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1320. batch_rule(self_value, self_bdim, other, alpha);
  1321. return self;
  1322. }
  1323. template <typename batch_rule_t, batch_rule_t batch_rule>
  1324. at::Tensor add_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
  1325. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1326. auto maybe_layer = maybeCurrentDynamicLayer();
  1327. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1328. int64_t cur_level = maybe_layer->layerId();
  1329. if (!isBatchedAtLevel(self, cur_level)) {
  1330. return at::_ops::add_Scalar::call(self, other, alpha);
  1331. }
  1332. Tensor self_value;
  1333. optional<int64_t> self_bdim;
  1334. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1335. auto results = batch_rule(self_value, self_bdim, other, alpha);
  1336. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1337. }
  1338. template <typename batch_rule_t, batch_rule_t batch_rule>
  1339. at::Tensor & add__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
  1340. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1341. auto maybe_layer = maybeCurrentDynamicLayer();
  1342. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  1343. int64_t cur_level = maybe_layer->layerId();
  1344. if (!isBatchedAtLevel(self, cur_level)) {
  1345. return at::_ops::add__Scalar::call(self, other, alpha);
  1346. }
  1347. Tensor self_value;
  1348. optional<int64_t> self_bdim;
  1349. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1350. batch_rule(self_value, self_bdim, other, alpha);
  1351. return self;
  1352. }
  1353. template <typename batch_rule_t, batch_rule_t batch_rule>
  1354. at::Tensor addmv_generated_plumbing(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) {
  1355. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1356. auto maybe_layer = maybeCurrentDynamicLayer();
  1357. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1358. int64_t cur_level = maybe_layer->layerId();
  1359. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat, cur_level) && !isBatchedAtLevel(vec, cur_level)) {
  1360. return at::_ops::addmv::call(self, mat, vec, beta, alpha);
  1361. }
  1362. Tensor self_value;
  1363. optional<int64_t> self_bdim;
  1364. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1365. Tensor mat_value;
  1366. optional<int64_t> mat_bdim;
  1367. std::tie(mat_value, mat_bdim) = unwrapTensorAtLevel(mat, cur_level);
  1368. Tensor vec_value;
  1369. optional<int64_t> vec_bdim;
  1370. std::tie(vec_value, vec_bdim) = unwrapTensorAtLevel(vec, cur_level);
  1371. auto results = batch_rule(self_value, self_bdim, mat_value, mat_bdim, vec_value, vec_bdim, beta, alpha);
  1372. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1373. }
  1374. template <typename batch_rule_t, batch_rule_t batch_rule>
  1375. at::Tensor & addmv__generated_plumbing(at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) {
  1376. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1377. auto maybe_layer = maybeCurrentDynamicLayer();
  1378. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  1379. int64_t cur_level = maybe_layer->layerId();
  1380. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat, cur_level) && !isBatchedAtLevel(vec, cur_level)) {
  1381. return at::_ops::addmv_::call(self, mat, vec, beta, alpha);
  1382. }
  1383. Tensor self_value;
  1384. optional<int64_t> self_bdim;
  1385. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1386. Tensor mat_value;
  1387. optional<int64_t> mat_bdim;
  1388. std::tie(mat_value, mat_bdim) = unwrapTensorAtLevel(mat, cur_level);
  1389. Tensor vec_value;
  1390. optional<int64_t> vec_bdim;
  1391. std::tie(vec_value, vec_bdim) = unwrapTensorAtLevel(vec, cur_level);
  1392. batch_rule(self_value, self_bdim, mat_value, mat_bdim, vec_value, vec_bdim, beta, alpha);
  1393. return self;
  1394. }
  1395. template <typename batch_rule_t, batch_rule_t batch_rule>
  1396. at::Tensor addr_generated_plumbing(const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) {
  1397. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1398. auto maybe_layer = maybeCurrentDynamicLayer();
  1399. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1400. int64_t cur_level = maybe_layer->layerId();
  1401. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(vec1, cur_level) && !isBatchedAtLevel(vec2, cur_level)) {
  1402. return at::_ops::addr::call(self, vec1, vec2, beta, alpha);
  1403. }
  1404. Tensor self_value;
  1405. optional<int64_t> self_bdim;
  1406. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1407. Tensor vec1_value;
  1408. optional<int64_t> vec1_bdim;
  1409. std::tie(vec1_value, vec1_bdim) = unwrapTensorAtLevel(vec1, cur_level);
  1410. Tensor vec2_value;
  1411. optional<int64_t> vec2_bdim;
  1412. std::tie(vec2_value, vec2_bdim) = unwrapTensorAtLevel(vec2, cur_level);
  1413. auto results = batch_rule(self_value, self_bdim, vec1_value, vec1_bdim, vec2_value, vec2_bdim, beta, alpha);
  1414. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1415. }
  1416. template <typename batch_rule_t, batch_rule_t batch_rule>
  1417. at::Tensor & addr__generated_plumbing(at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) {
  1418. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1419. auto maybe_layer = maybeCurrentDynamicLayer();
  1420. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  1421. int64_t cur_level = maybe_layer->layerId();
  1422. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(vec1, cur_level) && !isBatchedAtLevel(vec2, cur_level)) {
  1423. return at::_ops::addr_::call(self, vec1, vec2, beta, alpha);
  1424. }
  1425. Tensor self_value;
  1426. optional<int64_t> self_bdim;
  1427. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1428. Tensor vec1_value;
  1429. optional<int64_t> vec1_bdim;
  1430. std::tie(vec1_value, vec1_bdim) = unwrapTensorAtLevel(vec1, cur_level);
  1431. Tensor vec2_value;
  1432. optional<int64_t> vec2_bdim;
  1433. std::tie(vec2_value, vec2_bdim) = unwrapTensorAtLevel(vec2, cur_level);
  1434. batch_rule(self_value, self_bdim, vec1_value, vec1_bdim, vec2_value, vec2_bdim, beta, alpha);
  1435. return self;
  1436. }
  1437. template <typename batch_rule_t, batch_rule_t batch_rule>
  1438. at::Tensor affine_grid_generator_generated_plumbing(const at::Tensor & theta, at::IntArrayRef size, bool align_corners) {
  1439. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1440. auto maybe_layer = maybeCurrentDynamicLayer();
  1441. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1442. int64_t cur_level = maybe_layer->layerId();
  1443. if (!isBatchedAtLevel(theta, cur_level)) {
  1444. return at::_ops::affine_grid_generator::call(theta, size, align_corners);
  1445. }
  1446. Tensor theta_value;
  1447. optional<int64_t> theta_bdim;
  1448. std::tie(theta_value, theta_bdim) = unwrapTensorAtLevel(theta, cur_level);
  1449. auto results = batch_rule(theta_value, theta_bdim, size, align_corners);
  1450. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1451. }
  1452. template <typename batch_rule_t, batch_rule_t batch_rule>
  1453. at::Tensor affine_grid_generator_backward_generated_plumbing(const at::Tensor & grad, at::IntArrayRef size, bool align_corners) {
  1454. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1455. auto maybe_layer = maybeCurrentDynamicLayer();
  1456. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1457. int64_t cur_level = maybe_layer->layerId();
  1458. if (!isBatchedAtLevel(grad, cur_level)) {
  1459. return at::_ops::affine_grid_generator_backward::call(grad, size, align_corners);
  1460. }
  1461. Tensor grad_value;
  1462. optional<int64_t> grad_bdim;
  1463. std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
  1464. auto results = batch_rule(grad_value, grad_bdim, size, align_corners);
  1465. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1466. }
  1467. template <typename batch_rule_t, batch_rule_t batch_rule>
  1468. at::Tensor _is_all_true_generated_plumbing(const at::Tensor & self) {
  1469. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1470. auto maybe_layer = maybeCurrentDynamicLayer();
  1471. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1472. int64_t cur_level = maybe_layer->layerId();
  1473. if (!isBatchedAtLevel(self, cur_level)) {
  1474. return at::_ops::_is_all_true::call(self);
  1475. }
  1476. Tensor self_value;
  1477. optional<int64_t> self_bdim;
  1478. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1479. auto results = batch_rule(self_value, self_bdim);
  1480. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1481. }
  1482. template <typename batch_rule_t, batch_rule_t batch_rule>
  1483. at::Tensor _is_any_true_generated_plumbing(const at::Tensor & self) {
  1484. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1485. auto maybe_layer = maybeCurrentDynamicLayer();
  1486. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1487. int64_t cur_level = maybe_layer->layerId();
  1488. if (!isBatchedAtLevel(self, cur_level)) {
  1489. return at::_ops::_is_any_true::call(self);
  1490. }
  1491. Tensor self_value;
  1492. optional<int64_t> self_bdim;
  1493. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1494. auto results = batch_rule(self_value, self_bdim);
  1495. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1496. }
  1497. template <typename batch_rule_t, batch_rule_t batch_rule>
  1498. at::Tensor _test_check_tensor_generated_plumbing(const at::Tensor & self) {
  1499. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1500. auto maybe_layer = maybeCurrentDynamicLayer();
  1501. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1502. int64_t cur_level = maybe_layer->layerId();
  1503. if (!isBatchedAtLevel(self, cur_level)) {
  1504. return at::_ops::_test_check_tensor::call(self);
  1505. }
  1506. Tensor self_value;
  1507. optional<int64_t> self_bdim;
  1508. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1509. auto results = batch_rule(self_value, self_bdim);
  1510. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1511. }
  1512. template <typename batch_rule_t, batch_rule_t batch_rule>
  1513. at::Tensor all_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) {
  1514. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1515. auto maybe_layer = maybeCurrentDynamicLayer();
  1516. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1517. int64_t cur_level = maybe_layer->layerId();
  1518. if (!isBatchedAtLevel(self, cur_level)) {
  1519. return at::_ops::all_dim::call(self, dim, keepdim);
  1520. }
  1521. Tensor self_value;
  1522. optional<int64_t> self_bdim;
  1523. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1524. auto results = batch_rule(self_value, self_bdim, dim, keepdim);
  1525. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1526. }
  1527. template <typename batch_rule_t, batch_rule_t batch_rule>
  1528. at::Tensor all_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) {
  1529. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1530. auto maybe_layer = maybeCurrentDynamicLayer();
  1531. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1532. int64_t cur_level = maybe_layer->layerId();
  1533. if (!isBatchedAtLevel(self, cur_level)) {
  1534. return at::_ops::all_dimname::call(self, dim, keepdim);
  1535. }
  1536. Tensor self_value;
  1537. optional<int64_t> self_bdim;
  1538. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1539. auto results = batch_rule(self_value, self_bdim, dim, keepdim);
  1540. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1541. }
  1542. template <typename batch_rule_t, batch_rule_t batch_rule>
  1543. at::Tensor any_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) {
  1544. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1545. auto maybe_layer = maybeCurrentDynamicLayer();
  1546. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1547. int64_t cur_level = maybe_layer->layerId();
  1548. if (!isBatchedAtLevel(self, cur_level)) {
  1549. return at::_ops::any_dim::call(self, dim, keepdim);
  1550. }
  1551. Tensor self_value;
  1552. optional<int64_t> self_bdim;
  1553. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1554. auto results = batch_rule(self_value, self_bdim, dim, keepdim);
  1555. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1556. }
  1557. template <typename batch_rule_t, batch_rule_t batch_rule>
  1558. at::Tensor any_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) {
  1559. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1560. auto maybe_layer = maybeCurrentDynamicLayer();
  1561. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1562. int64_t cur_level = maybe_layer->layerId();
  1563. if (!isBatchedAtLevel(self, cur_level)) {
  1564. return at::_ops::any_dimname::call(self, dim, keepdim);
  1565. }
  1566. Tensor self_value;
  1567. optional<int64_t> self_bdim;
  1568. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1569. auto results = batch_rule(self_value, self_bdim, dim, keepdim);
  1570. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1571. }
  1572. template <typename batch_rule_t, batch_rule_t batch_rule>
  1573. at::Tensor _dim_arange_generated_plumbing(const at::Tensor & like, int64_t dim) {
  1574. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1575. auto maybe_layer = maybeCurrentDynamicLayer();
  1576. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1577. int64_t cur_level = maybe_layer->layerId();
  1578. if (!isBatchedAtLevel(like, cur_level)) {
  1579. return at::_ops::_dim_arange::call(like, dim);
  1580. }
  1581. Tensor like_value;
  1582. optional<int64_t> like_bdim;
  1583. std::tie(like_value, like_bdim) = unwrapTensorAtLevel(like, cur_level);
  1584. auto results = batch_rule(like_value, like_bdim, dim);
  1585. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1586. }
  1587. template <typename batch_rule_t, batch_rule_t batch_rule>
  1588. at::Tensor argmax_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim) {
  1589. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1590. auto maybe_layer = maybeCurrentDynamicLayer();
  1591. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1592. int64_t cur_level = maybe_layer->layerId();
  1593. if (!isBatchedAtLevel(self, cur_level)) {
  1594. return at::_ops::argmax::call(self, dim, keepdim);
  1595. }
  1596. Tensor self_value;
  1597. optional<int64_t> self_bdim;
  1598. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1599. auto results = batch_rule(self_value, self_bdim, dim, keepdim);
  1600. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1601. }
  1602. template <typename batch_rule_t, batch_rule_t batch_rule>
  1603. at::Tensor argmin_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim) {
  1604. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1605. auto maybe_layer = maybeCurrentDynamicLayer();
  1606. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1607. int64_t cur_level = maybe_layer->layerId();
  1608. if (!isBatchedAtLevel(self, cur_level)) {
  1609. return at::_ops::argmin::call(self, dim, keepdim);
  1610. }
  1611. Tensor self_value;
  1612. optional<int64_t> self_bdim;
  1613. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1614. auto results = batch_rule(self_value, self_bdim, dim, keepdim);
  1615. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1616. }
  1617. template <typename batch_rule_t, batch_rule_t batch_rule>
  1618. at::Tensor acosh_generated_plumbing(const at::Tensor & self) {
  1619. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1620. auto maybe_layer = maybeCurrentDynamicLayer();
  1621. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1622. int64_t cur_level = maybe_layer->layerId();
  1623. if (!isBatchedAtLevel(self, cur_level)) {
  1624. return at::_ops::acosh::call(self);
  1625. }
  1626. Tensor self_value;
  1627. optional<int64_t> self_bdim;
  1628. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1629. auto results = batch_rule(self_value, self_bdim);
  1630. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1631. }
  1632. template <typename batch_rule_t, batch_rule_t batch_rule>
  1633. at::Tensor & acosh__generated_plumbing(at::Tensor & self) {
  1634. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1635. auto maybe_layer = maybeCurrentDynamicLayer();
  1636. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  1637. int64_t cur_level = maybe_layer->layerId();
  1638. if (!isBatchedAtLevel(self, cur_level)) {
  1639. return at::_ops::acosh_::call(self);
  1640. }
  1641. Tensor self_value;
  1642. optional<int64_t> self_bdim;
  1643. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1644. batch_rule(self_value, self_bdim);
  1645. return self;
  1646. }
  1647. template <typename batch_rule_t, batch_rule_t batch_rule>
  1648. at::Tensor arccosh_generated_plumbing(const at::Tensor & self) {
  1649. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1650. auto maybe_layer = maybeCurrentDynamicLayer();
  1651. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1652. int64_t cur_level = maybe_layer->layerId();
  1653. if (!isBatchedAtLevel(self, cur_level)) {
  1654. return at::_ops::arccosh::call(self);
  1655. }
  1656. Tensor self_value;
  1657. optional<int64_t> self_bdim;
  1658. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1659. auto results = batch_rule(self_value, self_bdim);
  1660. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1661. }
  1662. template <typename batch_rule_t, batch_rule_t batch_rule>
  1663. at::Tensor & arccosh__generated_plumbing(at::Tensor & self) {
  1664. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1665. auto maybe_layer = maybeCurrentDynamicLayer();
  1666. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  1667. int64_t cur_level = maybe_layer->layerId();
  1668. if (!isBatchedAtLevel(self, cur_level)) {
  1669. return at::_ops::arccosh_::call(self);
  1670. }
  1671. Tensor self_value;
  1672. optional<int64_t> self_bdim;
  1673. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1674. batch_rule(self_value, self_bdim);
  1675. return self;
  1676. }
  1677. template <typename batch_rule_t, batch_rule_t batch_rule>
  1678. at::Tensor asinh_generated_plumbing(const at::Tensor & self) {
  1679. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1680. auto maybe_layer = maybeCurrentDynamicLayer();
  1681. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1682. int64_t cur_level = maybe_layer->layerId();
  1683. if (!isBatchedAtLevel(self, cur_level)) {
  1684. return at::_ops::asinh::call(self);
  1685. }
  1686. Tensor self_value;
  1687. optional<int64_t> self_bdim;
  1688. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1689. auto results = batch_rule(self_value, self_bdim);
  1690. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1691. }
  1692. template <typename batch_rule_t, batch_rule_t batch_rule>
  1693. at::Tensor & asinh__generated_plumbing(at::Tensor & self) {
  1694. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1695. auto maybe_layer = maybeCurrentDynamicLayer();
  1696. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  1697. int64_t cur_level = maybe_layer->layerId();
  1698. if (!isBatchedAtLevel(self, cur_level)) {
  1699. return at::_ops::asinh_::call(self);
  1700. }
  1701. Tensor self_value;
  1702. optional<int64_t> self_bdim;
  1703. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1704. batch_rule(self_value, self_bdim);
  1705. return self;
  1706. }
  1707. template <typename batch_rule_t, batch_rule_t batch_rule>
  1708. at::Tensor arcsinh_generated_plumbing(const at::Tensor & self) {
  1709. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1710. auto maybe_layer = maybeCurrentDynamicLayer();
  1711. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1712. int64_t cur_level = maybe_layer->layerId();
  1713. if (!isBatchedAtLevel(self, cur_level)) {
  1714. return at::_ops::arcsinh::call(self);
  1715. }
  1716. Tensor self_value;
  1717. optional<int64_t> self_bdim;
  1718. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1719. auto results = batch_rule(self_value, self_bdim);
  1720. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1721. }
  1722. template <typename batch_rule_t, batch_rule_t batch_rule>
  1723. at::Tensor & arcsinh__generated_plumbing(at::Tensor & self) {
  1724. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1725. auto maybe_layer = maybeCurrentDynamicLayer();
  1726. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  1727. int64_t cur_level = maybe_layer->layerId();
  1728. if (!isBatchedAtLevel(self, cur_level)) {
  1729. return at::_ops::arcsinh_::call(self);
  1730. }
  1731. Tensor self_value;
  1732. optional<int64_t> self_bdim;
  1733. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1734. batch_rule(self_value, self_bdim);
  1735. return self;
  1736. }
  1737. template <typename batch_rule_t, batch_rule_t batch_rule>
  1738. at::Tensor atanh_generated_plumbing(const at::Tensor & self) {
  1739. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1740. auto maybe_layer = maybeCurrentDynamicLayer();
  1741. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1742. int64_t cur_level = maybe_layer->layerId();
  1743. if (!isBatchedAtLevel(self, cur_level)) {
  1744. return at::_ops::atanh::call(self);
  1745. }
  1746. Tensor self_value;
  1747. optional<int64_t> self_bdim;
  1748. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1749. auto results = batch_rule(self_value, self_bdim);
  1750. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1751. }
  1752. template <typename batch_rule_t, batch_rule_t batch_rule>
  1753. at::Tensor & atanh__generated_plumbing(at::Tensor & self) {
  1754. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1755. auto maybe_layer = maybeCurrentDynamicLayer();
  1756. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  1757. int64_t cur_level = maybe_layer->layerId();
  1758. if (!isBatchedAtLevel(self, cur_level)) {
  1759. return at::_ops::atanh_::call(self);
  1760. }
  1761. Tensor self_value;
  1762. optional<int64_t> self_bdim;
  1763. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1764. batch_rule(self_value, self_bdim);
  1765. return self;
  1766. }
  1767. template <typename batch_rule_t, batch_rule_t batch_rule>
  1768. at::Tensor arctanh_generated_plumbing(const at::Tensor & self) {
  1769. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1770. auto maybe_layer = maybeCurrentDynamicLayer();
  1771. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1772. int64_t cur_level = maybe_layer->layerId();
  1773. if (!isBatchedAtLevel(self, cur_level)) {
  1774. return at::_ops::arctanh::call(self);
  1775. }
  1776. Tensor self_value;
  1777. optional<int64_t> self_bdim;
  1778. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1779. auto results = batch_rule(self_value, self_bdim);
  1780. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1781. }
  1782. template <typename batch_rule_t, batch_rule_t batch_rule>
  1783. at::Tensor & arctanh__generated_plumbing(at::Tensor & self) {
  1784. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1785. auto maybe_layer = maybeCurrentDynamicLayer();
  1786. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  1787. int64_t cur_level = maybe_layer->layerId();
  1788. if (!isBatchedAtLevel(self, cur_level)) {
  1789. return at::_ops::arctanh_::call(self);
  1790. }
  1791. Tensor self_value;
  1792. optional<int64_t> self_bdim;
  1793. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1794. batch_rule(self_value, self_bdim);
  1795. return self;
  1796. }
  1797. template <typename batch_rule_t, batch_rule_t batch_rule>
  1798. at::Tensor as_strided_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) {
  1799. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1800. auto maybe_layer = maybeCurrentDynamicLayer();
  1801. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1802. int64_t cur_level = maybe_layer->layerId();
  1803. if (!isBatchedAtLevel(self, cur_level)) {
  1804. return at::_ops::as_strided::call(self, size, stride, storage_offset);
  1805. }
  1806. Tensor self_value;
  1807. optional<int64_t> self_bdim;
  1808. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1809. auto results = batch_rule(self_value, self_bdim, size, stride, storage_offset);
  1810. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1811. }
  1812. template <typename batch_rule_t, batch_rule_t batch_rule>
  1813. at::Tensor asin_generated_plumbing(const at::Tensor & self) {
  1814. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1815. auto maybe_layer = maybeCurrentDynamicLayer();
  1816. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1817. int64_t cur_level = maybe_layer->layerId();
  1818. if (!isBatchedAtLevel(self, cur_level)) {
  1819. return at::_ops::asin::call(self);
  1820. }
  1821. Tensor self_value;
  1822. optional<int64_t> self_bdim;
  1823. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1824. auto results = batch_rule(self_value, self_bdim);
  1825. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1826. }
  1827. template <typename batch_rule_t, batch_rule_t batch_rule>
  1828. at::Tensor & asin__generated_plumbing(at::Tensor & self) {
  1829. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1830. auto maybe_layer = maybeCurrentDynamicLayer();
  1831. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  1832. int64_t cur_level = maybe_layer->layerId();
  1833. if (!isBatchedAtLevel(self, cur_level)) {
  1834. return at::_ops::asin_::call(self);
  1835. }
  1836. Tensor self_value;
  1837. optional<int64_t> self_bdim;
  1838. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1839. batch_rule(self_value, self_bdim);
  1840. return self;
  1841. }
  1842. template <typename batch_rule_t, batch_rule_t batch_rule>
  1843. at::Tensor arcsin_generated_plumbing(const at::Tensor & self) {
  1844. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1845. auto maybe_layer = maybeCurrentDynamicLayer();
  1846. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1847. int64_t cur_level = maybe_layer->layerId();
  1848. if (!isBatchedAtLevel(self, cur_level)) {
  1849. return at::_ops::arcsin::call(self);
  1850. }
  1851. Tensor self_value;
  1852. optional<int64_t> self_bdim;
  1853. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1854. auto results = batch_rule(self_value, self_bdim);
  1855. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1856. }
  1857. template <typename batch_rule_t, batch_rule_t batch_rule>
  1858. at::Tensor & arcsin__generated_plumbing(at::Tensor & self) {
  1859. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1860. auto maybe_layer = maybeCurrentDynamicLayer();
  1861. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  1862. int64_t cur_level = maybe_layer->layerId();
  1863. if (!isBatchedAtLevel(self, cur_level)) {
  1864. return at::_ops::arcsin_::call(self);
  1865. }
  1866. Tensor self_value;
  1867. optional<int64_t> self_bdim;
  1868. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1869. batch_rule(self_value, self_bdim);
  1870. return self;
  1871. }
  1872. template <typename batch_rule_t, batch_rule_t batch_rule>
  1873. at::Tensor atan_generated_plumbing(const at::Tensor & self) {
  1874. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1875. auto maybe_layer = maybeCurrentDynamicLayer();
  1876. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1877. int64_t cur_level = maybe_layer->layerId();
  1878. if (!isBatchedAtLevel(self, cur_level)) {
  1879. return at::_ops::atan::call(self);
  1880. }
  1881. Tensor self_value;
  1882. optional<int64_t> self_bdim;
  1883. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1884. auto results = batch_rule(self_value, self_bdim);
  1885. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1886. }
  1887. template <typename batch_rule_t, batch_rule_t batch_rule>
  1888. at::Tensor & atan__generated_plumbing(at::Tensor & self) {
  1889. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1890. auto maybe_layer = maybeCurrentDynamicLayer();
  1891. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  1892. int64_t cur_level = maybe_layer->layerId();
  1893. if (!isBatchedAtLevel(self, cur_level)) {
  1894. return at::_ops::atan_::call(self);
  1895. }
  1896. Tensor self_value;
  1897. optional<int64_t> self_bdim;
  1898. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1899. batch_rule(self_value, self_bdim);
  1900. return self;
  1901. }
  1902. template <typename batch_rule_t, batch_rule_t batch_rule>
  1903. at::Tensor arctan_generated_plumbing(const at::Tensor & self) {
  1904. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1905. auto maybe_layer = maybeCurrentDynamicLayer();
  1906. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1907. int64_t cur_level = maybe_layer->layerId();
  1908. if (!isBatchedAtLevel(self, cur_level)) {
  1909. return at::_ops::arctan::call(self);
  1910. }
  1911. Tensor self_value;
  1912. optional<int64_t> self_bdim;
  1913. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1914. auto results = batch_rule(self_value, self_bdim);
  1915. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1916. }
  1917. template <typename batch_rule_t, batch_rule_t batch_rule>
  1918. at::Tensor & arctan__generated_plumbing(at::Tensor & self) {
  1919. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1920. auto maybe_layer = maybeCurrentDynamicLayer();
  1921. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  1922. int64_t cur_level = maybe_layer->layerId();
  1923. if (!isBatchedAtLevel(self, cur_level)) {
  1924. return at::_ops::arctan_::call(self);
  1925. }
  1926. Tensor self_value;
  1927. optional<int64_t> self_bdim;
  1928. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1929. batch_rule(self_value, self_bdim);
  1930. return self;
  1931. }
  1932. template <typename batch_rule_t, batch_rule_t batch_rule>
  1933. at::Tensor atleast_1d_generated_plumbing(const at::Tensor & self) {
  1934. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1935. auto maybe_layer = maybeCurrentDynamicLayer();
  1936. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1937. int64_t cur_level = maybe_layer->layerId();
  1938. if (!isBatchedAtLevel(self, cur_level)) {
  1939. return at::_ops::atleast_1d::call(self);
  1940. }
  1941. Tensor self_value;
  1942. optional<int64_t> self_bdim;
  1943. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1944. auto results = batch_rule(self_value, self_bdim);
  1945. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1946. }
  1947. template <typename batch_rule_t, batch_rule_t batch_rule>
  1948. ::std::vector<at::Tensor> atleast_1d_Sequence_generated_plumbing(at::TensorList tensors) {
  1949. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1950. auto maybe_layer = maybeCurrentDynamicLayer();
  1951. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1952. int64_t cur_level = maybe_layer->layerId();
  1953. if (!isBatchedAtLevel(tensors, cur_level)) {
  1954. return at::_ops::atleast_1d_Sequence::call(tensors);
  1955. }
  1956. auto results = batch_rule(tensors);
  1957. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  1958. }
  1959. template <typename batch_rule_t, batch_rule_t batch_rule>
  1960. at::Tensor atleast_2d_generated_plumbing(const at::Tensor & self) {
  1961. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1962. auto maybe_layer = maybeCurrentDynamicLayer();
  1963. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1964. int64_t cur_level = maybe_layer->layerId();
  1965. if (!isBatchedAtLevel(self, cur_level)) {
  1966. return at::_ops::atleast_2d::call(self);
  1967. }
  1968. Tensor self_value;
  1969. optional<int64_t> self_bdim;
  1970. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1971. auto results = batch_rule(self_value, self_bdim);
  1972. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  1973. }
  1974. template <typename batch_rule_t, batch_rule_t batch_rule>
  1975. ::std::vector<at::Tensor> atleast_2d_Sequence_generated_plumbing(at::TensorList tensors) {
  1976. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1977. auto maybe_layer = maybeCurrentDynamicLayer();
  1978. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1979. int64_t cur_level = maybe_layer->layerId();
  1980. if (!isBatchedAtLevel(tensors, cur_level)) {
  1981. return at::_ops::atleast_2d_Sequence::call(tensors);
  1982. }
  1983. auto results = batch_rule(tensors);
  1984. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  1985. }
  1986. template <typename batch_rule_t, batch_rule_t batch_rule>
  1987. at::Tensor atleast_3d_generated_plumbing(const at::Tensor & self) {
  1988. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  1989. auto maybe_layer = maybeCurrentDynamicLayer();
  1990. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  1991. int64_t cur_level = maybe_layer->layerId();
  1992. if (!isBatchedAtLevel(self, cur_level)) {
  1993. return at::_ops::atleast_3d::call(self);
  1994. }
  1995. Tensor self_value;
  1996. optional<int64_t> self_bdim;
  1997. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  1998. auto results = batch_rule(self_value, self_bdim);
  1999. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  2000. }
  2001. template <typename batch_rule_t, batch_rule_t batch_rule>
  2002. ::std::vector<at::Tensor> atleast_3d_Sequence_generated_plumbing(at::TensorList tensors) {
  2003. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2004. auto maybe_layer = maybeCurrentDynamicLayer();
  2005. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2006. int64_t cur_level = maybe_layer->layerId();
  2007. if (!isBatchedAtLevel(tensors, cur_level)) {
  2008. return at::_ops::atleast_3d_Sequence::call(tensors);
  2009. }
  2010. auto results = batch_rule(tensors);
  2011. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  2012. }
  2013. template <typename batch_rule_t, batch_rule_t batch_rule>
  2014. at::Tensor baddbmm_generated_plumbing(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
  2015. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2016. auto maybe_layer = maybeCurrentDynamicLayer();
  2017. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2018. int64_t cur_level = maybe_layer->layerId();
  2019. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(batch1, cur_level) && !isBatchedAtLevel(batch2, cur_level)) {
  2020. return at::_ops::baddbmm::call(self, batch1, batch2, beta, alpha);
  2021. }
  2022. Tensor self_value;
  2023. optional<int64_t> self_bdim;
  2024. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2025. Tensor batch1_value;
  2026. optional<int64_t> batch1_bdim;
  2027. std::tie(batch1_value, batch1_bdim) = unwrapTensorAtLevel(batch1, cur_level);
  2028. Tensor batch2_value;
  2029. optional<int64_t> batch2_bdim;
  2030. std::tie(batch2_value, batch2_bdim) = unwrapTensorAtLevel(batch2, cur_level);
  2031. auto results = batch_rule(self_value, self_bdim, batch1_value, batch1_bdim, batch2_value, batch2_bdim, beta, alpha);
  2032. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  2033. }
  2034. template <typename batch_rule_t, batch_rule_t batch_rule>
  2035. at::Tensor & baddbmm__generated_plumbing(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
  2036. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2037. auto maybe_layer = maybeCurrentDynamicLayer();
  2038. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  2039. int64_t cur_level = maybe_layer->layerId();
  2040. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(batch1, cur_level) && !isBatchedAtLevel(batch2, cur_level)) {
  2041. return at::_ops::baddbmm_::call(self, batch1, batch2, beta, alpha);
  2042. }
  2043. Tensor self_value;
  2044. optional<int64_t> self_bdim;
  2045. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2046. Tensor batch1_value;
  2047. optional<int64_t> batch1_bdim;
  2048. std::tie(batch1_value, batch1_bdim) = unwrapTensorAtLevel(batch1, cur_level);
  2049. Tensor batch2_value;
  2050. optional<int64_t> batch2_bdim;
  2051. std::tie(batch2_value, batch2_bdim) = unwrapTensorAtLevel(batch2, cur_level);
  2052. batch_rule(self_value, self_bdim, batch1_value, batch1_bdim, batch2_value, batch2_bdim, beta, alpha);
  2053. return self;
  2054. }
  2055. template <typename batch_rule_t, batch_rule_t batch_rule>
  2056. at::Tensor batch_norm_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double momentum, double eps, bool cudnn_enabled) {
  2057. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2058. auto maybe_layer = maybeCurrentDynamicLayer();
  2059. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2060. int64_t cur_level = maybe_layer->layerId();
  2061. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
  2062. return at::_ops::batch_norm::call(input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled);
  2063. }
  2064. Tensor input_value;
  2065. optional<int64_t> input_bdim;
  2066. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  2067. optional<Tensor> weight_value;
  2068. optional<int64_t> weight_bdim;
  2069. if (weight) {
  2070. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
  2071. }
  2072. optional<Tensor> bias_value;
  2073. optional<int64_t> bias_bdim;
  2074. if (bias) {
  2075. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  2076. }
  2077. optional<Tensor> running_mean_value;
  2078. optional<int64_t> running_mean_bdim;
  2079. if (running_mean) {
  2080. std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
  2081. }
  2082. optional<Tensor> running_var_value;
  2083. optional<int64_t> running_var_bdim;
  2084. if (running_var) {
  2085. std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
  2086. }
  2087. auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, training, momentum, eps, cudnn_enabled);
  2088. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  2089. }
  2090. template <typename batch_rule_t, batch_rule_t batch_rule>
  2091. at::Tensor quantized_batch_norm_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & var, double eps, double output_scale, int64_t output_zero_point) {
  2092. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2093. auto maybe_layer = maybeCurrentDynamicLayer();
  2094. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2095. int64_t cur_level = maybe_layer->layerId();
  2096. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(var, cur_level)) {
  2097. return at::_ops::quantized_batch_norm::call(input, weight, bias, mean, var, eps, output_scale, output_zero_point);
  2098. }
  2099. Tensor input_value;
  2100. optional<int64_t> input_bdim;
  2101. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  2102. Tensor mean_value;
  2103. optional<int64_t> mean_bdim;
  2104. std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level);
  2105. Tensor var_value;
  2106. optional<int64_t> var_bdim;
  2107. std::tie(var_value, var_bdim) = unwrapTensorAtLevel(var, cur_level);
  2108. optional<Tensor> weight_value;
  2109. optional<int64_t> weight_bdim;
  2110. if (weight) {
  2111. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
  2112. }
  2113. optional<Tensor> bias_value;
  2114. optional<int64_t> bias_bdim;
  2115. if (bias) {
  2116. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  2117. }
  2118. auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, mean_value, mean_bdim, var_value, var_bdim, eps, output_scale, output_zero_point);
  2119. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  2120. }
  2121. template <typename batch_rule_t, batch_rule_t batch_rule>
  2122. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _batch_norm_impl_index_backward_generated_plumbing(int64_t impl_index, const at::Tensor & input, const at::Tensor & grad_output, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var_transform, bool train, double eps, ::std::array<bool,3> output_mask, const at::Tensor & reservedSpace) {
  2123. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2124. auto maybe_layer = maybeCurrentDynamicLayer();
  2125. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2126. int64_t cur_level = maybe_layer->layerId();
  2127. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level) && !isBatchedAtLevel(save_mean, cur_level) && !isBatchedAtLevel(save_var_transform, cur_level) && !isBatchedAtLevel(reservedSpace, cur_level)) {
  2128. return at::_ops::_batch_norm_impl_index_backward::call(impl_index, input, grad_output, weight, running_mean, running_var, save_mean, save_var_transform, train, eps, output_mask, reservedSpace);
  2129. }
  2130. Tensor input_value;
  2131. optional<int64_t> input_bdim;
  2132. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  2133. Tensor grad_output_value;
  2134. optional<int64_t> grad_output_bdim;
  2135. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  2136. Tensor reservedSpace_value;
  2137. optional<int64_t> reservedSpace_bdim;
  2138. std::tie(reservedSpace_value, reservedSpace_bdim) = unwrapTensorAtLevel(reservedSpace, cur_level);
  2139. optional<Tensor> weight_value;
  2140. optional<int64_t> weight_bdim;
  2141. if (weight) {
  2142. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
  2143. }
  2144. optional<Tensor> running_mean_value;
  2145. optional<int64_t> running_mean_bdim;
  2146. if (running_mean) {
  2147. std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
  2148. }
  2149. optional<Tensor> running_var_value;
  2150. optional<int64_t> running_var_bdim;
  2151. if (running_var) {
  2152. std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
  2153. }
  2154. optional<Tensor> save_mean_value;
  2155. optional<int64_t> save_mean_bdim;
  2156. if (save_mean) {
  2157. std::tie(save_mean_value, save_mean_bdim) = unwrapTensorAtLevel(save_mean.value(), cur_level);
  2158. }
  2159. optional<Tensor> save_var_transform_value;
  2160. optional<int64_t> save_var_transform_bdim;
  2161. if (save_var_transform) {
  2162. std::tie(save_var_transform_value, save_var_transform_bdim) = unwrapTensorAtLevel(save_var_transform.value(), cur_level);
  2163. }
  2164. auto results = batch_rule(impl_index, input_value, input_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, save_mean_value, save_mean_bdim, save_var_transform_value, save_var_transform_bdim, train, eps, output_mask, reservedSpace_value, reservedSpace_bdim);
  2165. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  2166. }
  2167. template <typename batch_rule_t, batch_rule_t batch_rule>
  2168. at::Tensor bernoulli_generated_plumbing(const at::Tensor & self, c10::optional<at::Generator> generator) {
  2169. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2170. auto maybe_layer = maybeCurrentDynamicLayer();
  2171. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2172. int64_t cur_level = maybe_layer->layerId();
  2173. if (!isBatchedAtLevel(self, cur_level)) {
  2174. return at::_ops::bernoulli::call(self, generator);
  2175. }
  2176. Tensor self_value;
  2177. optional<int64_t> self_bdim;
  2178. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2179. auto results = batch_rule(self_value, self_bdim, generator);
  2180. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  2181. }
  2182. template <typename batch_rule_t, batch_rule_t batch_rule>
  2183. at::Tensor & bernoulli__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & p, c10::optional<at::Generator> generator) {
  2184. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2185. auto maybe_layer = maybeCurrentDynamicLayer();
  2186. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  2187. int64_t cur_level = maybe_layer->layerId();
  2188. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(p, cur_level)) {
  2189. return at::_ops::bernoulli__Tensor::call(self, p, generator);
  2190. }
  2191. Tensor self_value;
  2192. optional<int64_t> self_bdim;
  2193. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2194. Tensor p_value;
  2195. optional<int64_t> p_bdim;
  2196. std::tie(p_value, p_bdim) = unwrapTensorAtLevel(p, cur_level);
  2197. batch_rule(self_value, self_bdim, p_value, p_bdim, generator);
  2198. return self;
  2199. }
  2200. template <typename batch_rule_t, batch_rule_t batch_rule>
  2201. at::Tensor & bernoulli__float_generated_plumbing(at::Tensor & self, double p, c10::optional<at::Generator> generator) {
  2202. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2203. auto maybe_layer = maybeCurrentDynamicLayer();
  2204. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  2205. int64_t cur_level = maybe_layer->layerId();
  2206. if (!isBatchedAtLevel(self, cur_level)) {
  2207. return at::_ops::bernoulli__float::call(self, p, generator);
  2208. }
  2209. Tensor self_value;
  2210. optional<int64_t> self_bdim;
  2211. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2212. batch_rule(self_value, self_bdim, p, generator);
  2213. return self;
  2214. }
  2215. template <typename batch_rule_t, batch_rule_t batch_rule>
  2216. at::Tensor bernoulli_p_generated_plumbing(const at::Tensor & self, double p, c10::optional<at::Generator> generator) {
  2217. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2218. auto maybe_layer = maybeCurrentDynamicLayer();
  2219. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2220. int64_t cur_level = maybe_layer->layerId();
  2221. if (!isBatchedAtLevel(self, cur_level)) {
  2222. return at::_ops::bernoulli_p::call(self, p, generator);
  2223. }
  2224. Tensor self_value;
  2225. optional<int64_t> self_bdim;
  2226. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2227. auto results = batch_rule(self_value, self_bdim, p, generator);
  2228. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  2229. }
  2230. template <typename batch_rule_t, batch_rule_t batch_rule>
  2231. at::Tensor bilinear_generated_plumbing(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & weight, const c10::optional<at::Tensor> & bias) {
  2232. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2233. auto maybe_layer = maybeCurrentDynamicLayer();
  2234. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2235. int64_t cur_level = maybe_layer->layerId();
  2236. if (!isBatchedAtLevel(input1, cur_level) && !isBatchedAtLevel(input2, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  2237. return at::_ops::bilinear::call(input1, input2, weight, bias);
  2238. }
  2239. Tensor input1_value;
  2240. optional<int64_t> input1_bdim;
  2241. std::tie(input1_value, input1_bdim) = unwrapTensorAtLevel(input1, cur_level);
  2242. Tensor input2_value;
  2243. optional<int64_t> input2_bdim;
  2244. std::tie(input2_value, input2_bdim) = unwrapTensorAtLevel(input2, cur_level);
  2245. Tensor weight_value;
  2246. optional<int64_t> weight_bdim;
  2247. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  2248. optional<Tensor> bias_value;
  2249. optional<int64_t> bias_bdim;
  2250. if (bias) {
  2251. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  2252. }
  2253. auto results = batch_rule(input1_value, input1_bdim, input2_value, input2_bdim, weight_value, weight_bdim, bias_value, bias_bdim);
  2254. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  2255. }
  2256. template <typename batch_rule_t, batch_rule_t batch_rule>
  2257. at::Tensor binary_cross_entropy_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction) {
  2258. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2259. auto maybe_layer = maybeCurrentDynamicLayer();
  2260. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2261. int64_t cur_level = maybe_layer->layerId();
  2262. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
  2263. return at::_ops::binary_cross_entropy::call(self, target, weight, reduction);
  2264. }
  2265. Tensor self_value;
  2266. optional<int64_t> self_bdim;
  2267. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2268. Tensor target_value;
  2269. optional<int64_t> target_bdim;
  2270. std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
  2271. optional<Tensor> weight_value;
  2272. optional<int64_t> weight_bdim;
  2273. if (weight) {
  2274. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
  2275. }
  2276. auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction);
  2277. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  2278. }
  2279. template <typename batch_rule_t, batch_rule_t batch_rule>
  2280. at::Tensor binary_cross_entropy_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction) {
  2281. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2282. auto maybe_layer = maybeCurrentDynamicLayer();
  2283. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2284. int64_t cur_level = maybe_layer->layerId();
  2285. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
  2286. return at::_ops::binary_cross_entropy_backward::call(grad_output, self, target, weight, reduction);
  2287. }
  2288. Tensor grad_output_value;
  2289. optional<int64_t> grad_output_bdim;
  2290. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  2291. Tensor self_value;
  2292. optional<int64_t> self_bdim;
  2293. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2294. Tensor target_value;
  2295. optional<int64_t> target_bdim;
  2296. std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
  2297. optional<Tensor> weight_value;
  2298. optional<int64_t> weight_bdim;
  2299. if (weight) {
  2300. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
  2301. }
  2302. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction);
  2303. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  2304. }
  2305. template <typename batch_rule_t, batch_rule_t batch_rule>
  2306. at::Tensor binary_cross_entropy_with_logits_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & pos_weight, int64_t reduction) {
  2307. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2308. auto maybe_layer = maybeCurrentDynamicLayer();
  2309. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2310. int64_t cur_level = maybe_layer->layerId();
  2311. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(pos_weight, cur_level)) {
  2312. return at::_ops::binary_cross_entropy_with_logits::call(self, target, weight, pos_weight, reduction);
  2313. }
  2314. Tensor self_value;
  2315. optional<int64_t> self_bdim;
  2316. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2317. Tensor target_value;
  2318. optional<int64_t> target_bdim;
  2319. std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
  2320. optional<Tensor> weight_value;
  2321. optional<int64_t> weight_bdim;
  2322. if (weight) {
  2323. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
  2324. }
  2325. optional<Tensor> pos_weight_value;
  2326. optional<int64_t> pos_weight_bdim;
  2327. if (pos_weight) {
  2328. std::tie(pos_weight_value, pos_weight_bdim) = unwrapTensorAtLevel(pos_weight.value(), cur_level);
  2329. }
  2330. auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, pos_weight_value, pos_weight_bdim, reduction);
  2331. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  2332. }
  2333. template <typename batch_rule_t, batch_rule_t batch_rule>
  2334. at::Tensor bincount_generated_plumbing(const at::Tensor & self, const c10::optional<at::Tensor> & weights, int64_t minlength) {
  2335. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2336. auto maybe_layer = maybeCurrentDynamicLayer();
  2337. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2338. int64_t cur_level = maybe_layer->layerId();
  2339. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weights, cur_level)) {
  2340. return at::_ops::bincount::call(self, weights, minlength);
  2341. }
  2342. Tensor self_value;
  2343. optional<int64_t> self_bdim;
  2344. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2345. optional<Tensor> weights_value;
  2346. optional<int64_t> weights_bdim;
  2347. if (weights) {
  2348. std::tie(weights_value, weights_bdim) = unwrapTensorAtLevel(weights.value(), cur_level);
  2349. }
  2350. auto results = batch_rule(self_value, self_bdim, weights_value, weights_bdim, minlength);
  2351. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  2352. }
  2353. template <typename batch_rule_t, batch_rule_t batch_rule>
  2354. at::Tensor bitwise_not_generated_plumbing(const at::Tensor & self) {
  2355. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2356. auto maybe_layer = maybeCurrentDynamicLayer();
  2357. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2358. int64_t cur_level = maybe_layer->layerId();
  2359. if (!isBatchedAtLevel(self, cur_level)) {
  2360. return at::_ops::bitwise_not::call(self);
  2361. }
  2362. Tensor self_value;
  2363. optional<int64_t> self_bdim;
  2364. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2365. auto results = batch_rule(self_value, self_bdim);
  2366. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  2367. }
  2368. template <typename batch_rule_t, batch_rule_t batch_rule>
  2369. at::Tensor & bitwise_not__generated_plumbing(at::Tensor & self) {
  2370. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2371. auto maybe_layer = maybeCurrentDynamicLayer();
  2372. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  2373. int64_t cur_level = maybe_layer->layerId();
  2374. if (!isBatchedAtLevel(self, cur_level)) {
  2375. return at::_ops::bitwise_not_::call(self);
  2376. }
  2377. Tensor self_value;
  2378. optional<int64_t> self_bdim;
  2379. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2380. batch_rule(self_value, self_bdim);
  2381. return self;
  2382. }
  2383. template <typename batch_rule_t, batch_rule_t batch_rule>
  2384. at::Tensor copysign_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  2385. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2386. auto maybe_layer = maybeCurrentDynamicLayer();
  2387. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2388. int64_t cur_level = maybe_layer->layerId();
  2389. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  2390. return at::_ops::copysign_Tensor::call(self, other);
  2391. }
  2392. Tensor self_value;
  2393. optional<int64_t> self_bdim;
  2394. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2395. Tensor other_value;
  2396. optional<int64_t> other_bdim;
  2397. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  2398. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  2399. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  2400. }
  2401. template <typename batch_rule_t, batch_rule_t batch_rule>
  2402. at::Tensor & copysign__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  2403. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2404. auto maybe_layer = maybeCurrentDynamicLayer();
  2405. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  2406. int64_t cur_level = maybe_layer->layerId();
  2407. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  2408. return at::_ops::copysign__Tensor::call(self, other);
  2409. }
  2410. Tensor self_value;
  2411. optional<int64_t> self_bdim;
  2412. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2413. Tensor other_value;
  2414. optional<int64_t> other_bdim;
  2415. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  2416. batch_rule(self_value, self_bdim, other_value, other_bdim);
  2417. return self;
  2418. }
  2419. template <typename batch_rule_t, batch_rule_t batch_rule>
  2420. at::Tensor copysign_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
  2421. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2422. auto maybe_layer = maybeCurrentDynamicLayer();
  2423. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2424. int64_t cur_level = maybe_layer->layerId();
  2425. if (!isBatchedAtLevel(self, cur_level)) {
  2426. return at::_ops::copysign_Scalar::call(self, other);
  2427. }
  2428. Tensor self_value;
  2429. optional<int64_t> self_bdim;
  2430. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2431. auto results = batch_rule(self_value, self_bdim, other);
  2432. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  2433. }
  2434. template <typename batch_rule_t, batch_rule_t batch_rule>
  2435. at::Tensor & copysign__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
  2436. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2437. auto maybe_layer = maybeCurrentDynamicLayer();
  2438. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  2439. int64_t cur_level = maybe_layer->layerId();
  2440. if (!isBatchedAtLevel(self, cur_level)) {
  2441. return at::_ops::copysign__Scalar::call(self, other);
  2442. }
  2443. Tensor self_value;
  2444. optional<int64_t> self_bdim;
  2445. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2446. batch_rule(self_value, self_bdim, other);
  2447. return self;
  2448. }
  2449. template <typename batch_rule_t, batch_rule_t batch_rule>
  2450. at::Tensor logical_not_generated_plumbing(const at::Tensor & self) {
  2451. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2452. auto maybe_layer = maybeCurrentDynamicLayer();
  2453. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2454. int64_t cur_level = maybe_layer->layerId();
  2455. if (!isBatchedAtLevel(self, cur_level)) {
  2456. return at::_ops::logical_not::call(self);
  2457. }
  2458. Tensor self_value;
  2459. optional<int64_t> self_bdim;
  2460. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2461. auto results = batch_rule(self_value, self_bdim);
  2462. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  2463. }
  2464. template <typename batch_rule_t, batch_rule_t batch_rule>
  2465. at::Tensor & logical_not__generated_plumbing(at::Tensor & self) {
  2466. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2467. auto maybe_layer = maybeCurrentDynamicLayer();
  2468. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  2469. int64_t cur_level = maybe_layer->layerId();
  2470. if (!isBatchedAtLevel(self, cur_level)) {
  2471. return at::_ops::logical_not_::call(self);
  2472. }
  2473. Tensor self_value;
  2474. optional<int64_t> self_bdim;
  2475. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2476. batch_rule(self_value, self_bdim);
  2477. return self;
  2478. }
  2479. template <typename batch_rule_t, batch_rule_t batch_rule>
  2480. at::Tensor logical_xor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  2481. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2482. auto maybe_layer = maybeCurrentDynamicLayer();
  2483. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2484. int64_t cur_level = maybe_layer->layerId();
  2485. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  2486. return at::_ops::logical_xor::call(self, other);
  2487. }
  2488. Tensor self_value;
  2489. optional<int64_t> self_bdim;
  2490. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2491. Tensor other_value;
  2492. optional<int64_t> other_bdim;
  2493. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  2494. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  2495. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  2496. }
  2497. template <typename batch_rule_t, batch_rule_t batch_rule>
  2498. at::Tensor & logical_xor__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  2499. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2500. auto maybe_layer = maybeCurrentDynamicLayer();
  2501. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  2502. int64_t cur_level = maybe_layer->layerId();
  2503. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  2504. return at::_ops::logical_xor_::call(self, other);
  2505. }
  2506. Tensor self_value;
  2507. optional<int64_t> self_bdim;
  2508. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2509. Tensor other_value;
  2510. optional<int64_t> other_bdim;
  2511. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  2512. batch_rule(self_value, self_bdim, other_value, other_bdim);
  2513. return self;
  2514. }
  2515. template <typename batch_rule_t, batch_rule_t batch_rule>
  2516. at::Tensor logical_and_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  2517. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2518. auto maybe_layer = maybeCurrentDynamicLayer();
  2519. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2520. int64_t cur_level = maybe_layer->layerId();
  2521. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  2522. return at::_ops::logical_and::call(self, other);
  2523. }
  2524. Tensor self_value;
  2525. optional<int64_t> self_bdim;
  2526. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2527. Tensor other_value;
  2528. optional<int64_t> other_bdim;
  2529. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  2530. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  2531. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  2532. }
  2533. template <typename batch_rule_t, batch_rule_t batch_rule>
  2534. at::Tensor & logical_and__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  2535. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2536. auto maybe_layer = maybeCurrentDynamicLayer();
  2537. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  2538. int64_t cur_level = maybe_layer->layerId();
  2539. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  2540. return at::_ops::logical_and_::call(self, other);
  2541. }
  2542. Tensor self_value;
  2543. optional<int64_t> self_bdim;
  2544. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2545. Tensor other_value;
  2546. optional<int64_t> other_bdim;
  2547. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  2548. batch_rule(self_value, self_bdim, other_value, other_bdim);
  2549. return self;
  2550. }
  2551. template <typename batch_rule_t, batch_rule_t batch_rule>
  2552. at::Tensor logical_or_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  2553. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2554. auto maybe_layer = maybeCurrentDynamicLayer();
  2555. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2556. int64_t cur_level = maybe_layer->layerId();
  2557. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  2558. return at::_ops::logical_or::call(self, other);
  2559. }
  2560. Tensor self_value;
  2561. optional<int64_t> self_bdim;
  2562. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2563. Tensor other_value;
  2564. optional<int64_t> other_bdim;
  2565. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  2566. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  2567. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  2568. }
  2569. template <typename batch_rule_t, batch_rule_t batch_rule>
  2570. at::Tensor & logical_or__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  2571. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2572. auto maybe_layer = maybeCurrentDynamicLayer();
  2573. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  2574. int64_t cur_level = maybe_layer->layerId();
  2575. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  2576. return at::_ops::logical_or_::call(self, other);
  2577. }
  2578. Tensor self_value;
  2579. optional<int64_t> self_bdim;
  2580. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2581. Tensor other_value;
  2582. optional<int64_t> other_bdim;
  2583. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  2584. batch_rule(self_value, self_bdim, other_value, other_bdim);
  2585. return self;
  2586. }
  2587. template <typename batch_rule_t, batch_rule_t batch_rule>
  2588. at::Tensor bmm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2) {
  2589. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2590. auto maybe_layer = maybeCurrentDynamicLayer();
  2591. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2592. int64_t cur_level = maybe_layer->layerId();
  2593. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
  2594. return at::_ops::bmm::call(self, mat2);
  2595. }
  2596. Tensor self_value;
  2597. optional<int64_t> self_bdim;
  2598. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2599. Tensor mat2_value;
  2600. optional<int64_t> mat2_bdim;
  2601. std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
  2602. auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim);
  2603. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  2604. }
  2605. template <typename batch_rule_t, batch_rule_t batch_rule>
  2606. ::std::vector<at::Tensor> broadcast_tensors_generated_plumbing(at::TensorList tensors) {
  2607. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2608. auto maybe_layer = maybeCurrentDynamicLayer();
  2609. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2610. int64_t cur_level = maybe_layer->layerId();
  2611. if (!isBatchedAtLevel(tensors, cur_level)) {
  2612. return at::_ops::broadcast_tensors::call(tensors);
  2613. }
  2614. auto results = batch_rule(tensors);
  2615. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  2616. }
  2617. template <typename batch_rule_t, batch_rule_t batch_rule>
  2618. at::Tensor broadcast_to_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size) {
  2619. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2620. auto maybe_layer = maybeCurrentDynamicLayer();
  2621. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2622. int64_t cur_level = maybe_layer->layerId();
  2623. if (!isBatchedAtLevel(self, cur_level)) {
  2624. return at::_ops::broadcast_to::call(self, size);
  2625. }
  2626. Tensor self_value;
  2627. optional<int64_t> self_bdim;
  2628. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2629. auto results = batch_rule(self_value, self_bdim, size);
  2630. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  2631. }
  2632. template <typename batch_rule_t, batch_rule_t batch_rule>
  2633. at::Tensor _sparse_broadcast_to_generated_plumbing(const at::Tensor & self, at::IntArrayRef size) {
  2634. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2635. auto maybe_layer = maybeCurrentDynamicLayer();
  2636. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2637. int64_t cur_level = maybe_layer->layerId();
  2638. if (!isBatchedAtLevel(self, cur_level)) {
  2639. return at::_ops::_sparse_broadcast_to::call(self, size);
  2640. }
  2641. Tensor self_value;
  2642. optional<int64_t> self_bdim;
  2643. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2644. auto results = batch_rule(self_value, self_bdim, size);
  2645. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  2646. }
  2647. template <typename batch_rule_t, batch_rule_t batch_rule>
  2648. at::Tensor cat_generated_plumbing(const at::ITensorListRef & tensors, int64_t dim) {
  2649. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2650. auto maybe_layer = maybeCurrentDynamicLayer();
  2651. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2652. int64_t cur_level = maybe_layer->layerId();
  2653. if (!isBatchedAtLevel(tensors, cur_level)) {
  2654. return at::_ops::cat::call(tensors, dim);
  2655. }
  2656. auto results = batch_rule(tensors, dim);
  2657. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  2658. }
  2659. template <typename batch_rule_t, batch_rule_t batch_rule>
  2660. at::Tensor cat_names_generated_plumbing(at::TensorList tensors, at::Dimname dim) {
  2661. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2662. auto maybe_layer = maybeCurrentDynamicLayer();
  2663. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2664. int64_t cur_level = maybe_layer->layerId();
  2665. if (!isBatchedAtLevel(tensors, cur_level)) {
  2666. return at::_ops::cat_names::call(tensors, dim);
  2667. }
  2668. auto results = batch_rule(tensors, dim);
  2669. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  2670. }
  2671. template <typename batch_rule_t, batch_rule_t batch_rule>
  2672. at::Tensor concat_generated_plumbing(at::TensorList tensors, int64_t dim) {
  2673. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2674. auto maybe_layer = maybeCurrentDynamicLayer();
  2675. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2676. int64_t cur_level = maybe_layer->layerId();
  2677. if (!isBatchedAtLevel(tensors, cur_level)) {
  2678. return at::_ops::concat::call(tensors, dim);
  2679. }
  2680. auto results = batch_rule(tensors, dim);
  2681. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  2682. }
  2683. template <typename batch_rule_t, batch_rule_t batch_rule>
  2684. at::Tensor concat_names_generated_plumbing(at::TensorList tensors, at::Dimname dim) {
  2685. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2686. auto maybe_layer = maybeCurrentDynamicLayer();
  2687. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2688. int64_t cur_level = maybe_layer->layerId();
  2689. if (!isBatchedAtLevel(tensors, cur_level)) {
  2690. return at::_ops::concat_names::call(tensors, dim);
  2691. }
  2692. auto results = batch_rule(tensors, dim);
  2693. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  2694. }
  2695. template <typename batch_rule_t, batch_rule_t batch_rule>
  2696. at::Tensor concatenate_generated_plumbing(at::TensorList tensors, int64_t dim) {
  2697. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2698. auto maybe_layer = maybeCurrentDynamicLayer();
  2699. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2700. int64_t cur_level = maybe_layer->layerId();
  2701. if (!isBatchedAtLevel(tensors, cur_level)) {
  2702. return at::_ops::concatenate::call(tensors, dim);
  2703. }
  2704. auto results = batch_rule(tensors, dim);
  2705. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  2706. }
  2707. template <typename batch_rule_t, batch_rule_t batch_rule>
  2708. at::Tensor concatenate_names_generated_plumbing(at::TensorList tensors, at::Dimname dim) {
  2709. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2710. auto maybe_layer = maybeCurrentDynamicLayer();
  2711. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2712. int64_t cur_level = maybe_layer->layerId();
  2713. if (!isBatchedAtLevel(tensors, cur_level)) {
  2714. return at::_ops::concatenate_names::call(tensors, dim);
  2715. }
  2716. auto results = batch_rule(tensors, dim);
  2717. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  2718. }
  2719. template <typename batch_rule_t, batch_rule_t batch_rule>
  2720. at::Tensor block_diag_generated_plumbing(at::TensorList tensors) {
  2721. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2722. auto maybe_layer = maybeCurrentDynamicLayer();
  2723. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2724. int64_t cur_level = maybe_layer->layerId();
  2725. if (!isBatchedAtLevel(tensors, cur_level)) {
  2726. return at::_ops::block_diag::call(tensors);
  2727. }
  2728. auto results = batch_rule(tensors);
  2729. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  2730. }
  2731. template <typename batch_rule_t, batch_rule_t batch_rule>
  2732. at::Tensor ceil_generated_plumbing(const at::Tensor & self) {
  2733. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2734. auto maybe_layer = maybeCurrentDynamicLayer();
  2735. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2736. int64_t cur_level = maybe_layer->layerId();
  2737. if (!isBatchedAtLevel(self, cur_level)) {
  2738. return at::_ops::ceil::call(self);
  2739. }
  2740. Tensor self_value;
  2741. optional<int64_t> self_bdim;
  2742. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2743. auto results = batch_rule(self_value, self_bdim);
  2744. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  2745. }
  2746. template <typename batch_rule_t, batch_rule_t batch_rule>
  2747. at::Tensor & ceil__generated_plumbing(at::Tensor & self) {
  2748. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2749. auto maybe_layer = maybeCurrentDynamicLayer();
  2750. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  2751. int64_t cur_level = maybe_layer->layerId();
  2752. if (!isBatchedAtLevel(self, cur_level)) {
  2753. return at::_ops::ceil_::call(self);
  2754. }
  2755. Tensor self_value;
  2756. optional<int64_t> self_bdim;
  2757. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2758. batch_rule(self_value, self_bdim);
  2759. return self;
  2760. }
  2761. template <typename batch_rule_t, batch_rule_t batch_rule>
  2762. at::Tensor chain_matmul_generated_plumbing(at::TensorList matrices) {
  2763. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2764. auto maybe_layer = maybeCurrentDynamicLayer();
  2765. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2766. int64_t cur_level = maybe_layer->layerId();
  2767. if (!isBatchedAtLevel(matrices, cur_level)) {
  2768. return at::_ops::chain_matmul::call(matrices);
  2769. }
  2770. auto results = batch_rule(matrices);
  2771. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  2772. }
  2773. template <typename batch_rule_t, batch_rule_t batch_rule>
  2774. ::std::vector<at::Tensor> unsafe_chunk_generated_plumbing(const at::Tensor & self, int64_t chunks, int64_t dim) {
  2775. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2776. auto maybe_layer = maybeCurrentDynamicLayer();
  2777. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2778. int64_t cur_level = maybe_layer->layerId();
  2779. if (!isBatchedAtLevel(self, cur_level)) {
  2780. return at::_ops::unsafe_chunk::call(self, chunks, dim);
  2781. }
  2782. Tensor self_value;
  2783. optional<int64_t> self_bdim;
  2784. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2785. auto results = batch_rule(self_value, self_bdim, chunks, dim);
  2786. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  2787. }
  2788. template <typename batch_rule_t, batch_rule_t batch_rule>
  2789. ::std::vector<at::Tensor> chunk_generated_plumbing(const at::Tensor & self, int64_t chunks, int64_t dim) {
  2790. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2791. auto maybe_layer = maybeCurrentDynamicLayer();
  2792. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2793. int64_t cur_level = maybe_layer->layerId();
  2794. if (!isBatchedAtLevel(self, cur_level)) {
  2795. return at::_ops::chunk::call(self, chunks, dim);
  2796. }
  2797. Tensor self_value;
  2798. optional<int64_t> self_bdim;
  2799. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2800. auto results = batch_rule(self_value, self_bdim, chunks, dim);
  2801. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  2802. }
  2803. template <typename batch_rule_t, batch_rule_t batch_rule>
  2804. ::std::vector<at::Tensor> tensor_split_sections_generated_plumbing(const at::Tensor & self, c10::SymInt sections, int64_t dim) {
  2805. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2806. auto maybe_layer = maybeCurrentDynamicLayer();
  2807. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2808. int64_t cur_level = maybe_layer->layerId();
  2809. if (!isBatchedAtLevel(self, cur_level)) {
  2810. return at::_ops::tensor_split_sections::call(self, sections, dim);
  2811. }
  2812. Tensor self_value;
  2813. optional<int64_t> self_bdim;
  2814. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2815. auto results = batch_rule(self_value, self_bdim, sections, dim);
  2816. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  2817. }
  2818. template <typename batch_rule_t, batch_rule_t batch_rule>
  2819. ::std::vector<at::Tensor> tensor_split_indices_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef indices, int64_t dim) {
  2820. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2821. auto maybe_layer = maybeCurrentDynamicLayer();
  2822. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2823. int64_t cur_level = maybe_layer->layerId();
  2824. if (!isBatchedAtLevel(self, cur_level)) {
  2825. return at::_ops::tensor_split_indices::call(self, indices, dim);
  2826. }
  2827. Tensor self_value;
  2828. optional<int64_t> self_bdim;
  2829. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2830. auto results = batch_rule(self_value, self_bdim, indices, dim);
  2831. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  2832. }
  2833. template <typename batch_rule_t, batch_rule_t batch_rule>
  2834. ::std::vector<at::Tensor> tensor_split_tensor_indices_or_sections_generated_plumbing(const at::Tensor & self, const at::Tensor & tensor_indices_or_sections, int64_t dim) {
  2835. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2836. auto maybe_layer = maybeCurrentDynamicLayer();
  2837. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2838. int64_t cur_level = maybe_layer->layerId();
  2839. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor_indices_or_sections, cur_level)) {
  2840. return at::_ops::tensor_split_tensor_indices_or_sections::call(self, tensor_indices_or_sections, dim);
  2841. }
  2842. Tensor self_value;
  2843. optional<int64_t> self_bdim;
  2844. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2845. Tensor tensor_indices_or_sections_value;
  2846. optional<int64_t> tensor_indices_or_sections_bdim;
  2847. std::tie(tensor_indices_or_sections_value, tensor_indices_or_sections_bdim) = unwrapTensorAtLevel(tensor_indices_or_sections, cur_level);
  2848. auto results = batch_rule(self_value, self_bdim, tensor_indices_or_sections_value, tensor_indices_or_sections_bdim, dim);
  2849. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  2850. }
  2851. template <typename batch_rule_t, batch_rule_t batch_rule>
  2852. at::Tensor clamp_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) {
  2853. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2854. auto maybe_layer = maybeCurrentDynamicLayer();
  2855. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2856. int64_t cur_level = maybe_layer->layerId();
  2857. if (!isBatchedAtLevel(self, cur_level)) {
  2858. return at::_ops::clamp::call(self, min, max);
  2859. }
  2860. Tensor self_value;
  2861. optional<int64_t> self_bdim;
  2862. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2863. auto results = batch_rule(self_value, self_bdim, min, max);
  2864. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  2865. }
  2866. template <typename batch_rule_t, batch_rule_t batch_rule>
  2867. at::Tensor clamp_Tensor_generated_plumbing(const at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) {
  2868. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2869. auto maybe_layer = maybeCurrentDynamicLayer();
  2870. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2871. int64_t cur_level = maybe_layer->layerId();
  2872. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level) && !isBatchedAtLevel(max, cur_level)) {
  2873. return at::_ops::clamp_Tensor::call(self, min, max);
  2874. }
  2875. Tensor self_value;
  2876. optional<int64_t> self_bdim;
  2877. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2878. optional<Tensor> min_value;
  2879. optional<int64_t> min_bdim;
  2880. if (min) {
  2881. std::tie(min_value, min_bdim) = unwrapTensorAtLevel(min.value(), cur_level);
  2882. }
  2883. optional<Tensor> max_value;
  2884. optional<int64_t> max_bdim;
  2885. if (max) {
  2886. std::tie(max_value, max_bdim) = unwrapTensorAtLevel(max.value(), cur_level);
  2887. }
  2888. auto results = batch_rule(self_value, self_bdim, min_value, min_bdim, max_value, max_bdim);
  2889. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  2890. }
  2891. template <typename batch_rule_t, batch_rule_t batch_rule>
  2892. at::Tensor & clamp__generated_plumbing(at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) {
  2893. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2894. auto maybe_layer = maybeCurrentDynamicLayer();
  2895. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  2896. int64_t cur_level = maybe_layer->layerId();
  2897. if (!isBatchedAtLevel(self, cur_level)) {
  2898. return at::_ops::clamp_::call(self, min, max);
  2899. }
  2900. Tensor self_value;
  2901. optional<int64_t> self_bdim;
  2902. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2903. batch_rule(self_value, self_bdim, min, max);
  2904. return self;
  2905. }
  2906. template <typename batch_rule_t, batch_rule_t batch_rule>
  2907. at::Tensor & clamp__Tensor_generated_plumbing(at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) {
  2908. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2909. auto maybe_layer = maybeCurrentDynamicLayer();
  2910. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  2911. int64_t cur_level = maybe_layer->layerId();
  2912. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level) && !isBatchedAtLevel(max, cur_level)) {
  2913. return at::_ops::clamp__Tensor::call(self, min, max);
  2914. }
  2915. Tensor self_value;
  2916. optional<int64_t> self_bdim;
  2917. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2918. optional<Tensor> min_value;
  2919. optional<int64_t> min_bdim;
  2920. if (min) {
  2921. std::tie(min_value, min_bdim) = unwrapTensorAtLevel(min.value(), cur_level);
  2922. }
  2923. optional<Tensor> max_value;
  2924. optional<int64_t> max_bdim;
  2925. if (max) {
  2926. std::tie(max_value, max_bdim) = unwrapTensorAtLevel(max.value(), cur_level);
  2927. }
  2928. batch_rule(self_value, self_bdim, min_value, min_bdim, max_value, max_bdim);
  2929. return self;
  2930. }
  2931. template <typename batch_rule_t, batch_rule_t batch_rule>
  2932. at::Tensor clamp_max_generated_plumbing(const at::Tensor & self, const at::Scalar & max) {
  2933. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2934. auto maybe_layer = maybeCurrentDynamicLayer();
  2935. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2936. int64_t cur_level = maybe_layer->layerId();
  2937. if (!isBatchedAtLevel(self, cur_level)) {
  2938. return at::_ops::clamp_max::call(self, max);
  2939. }
  2940. Tensor self_value;
  2941. optional<int64_t> self_bdim;
  2942. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2943. auto results = batch_rule(self_value, self_bdim, max);
  2944. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  2945. }
  2946. template <typename batch_rule_t, batch_rule_t batch_rule>
  2947. at::Tensor clamp_max_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & max) {
  2948. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2949. auto maybe_layer = maybeCurrentDynamicLayer();
  2950. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  2951. int64_t cur_level = maybe_layer->layerId();
  2952. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(max, cur_level)) {
  2953. return at::_ops::clamp_max_Tensor::call(self, max);
  2954. }
  2955. Tensor self_value;
  2956. optional<int64_t> self_bdim;
  2957. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2958. Tensor max_value;
  2959. optional<int64_t> max_bdim;
  2960. std::tie(max_value, max_bdim) = unwrapTensorAtLevel(max, cur_level);
  2961. auto results = batch_rule(self_value, self_bdim, max_value, max_bdim);
  2962. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  2963. }
  2964. template <typename batch_rule_t, batch_rule_t batch_rule>
  2965. at::Tensor & clamp_max__generated_plumbing(at::Tensor & self, const at::Scalar & max) {
  2966. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2967. auto maybe_layer = maybeCurrentDynamicLayer();
  2968. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  2969. int64_t cur_level = maybe_layer->layerId();
  2970. if (!isBatchedAtLevel(self, cur_level)) {
  2971. return at::_ops::clamp_max_::call(self, max);
  2972. }
  2973. Tensor self_value;
  2974. optional<int64_t> self_bdim;
  2975. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2976. batch_rule(self_value, self_bdim, max);
  2977. return self;
  2978. }
  2979. template <typename batch_rule_t, batch_rule_t batch_rule>
  2980. at::Tensor & clamp_max__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & max) {
  2981. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  2982. auto maybe_layer = maybeCurrentDynamicLayer();
  2983. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  2984. int64_t cur_level = maybe_layer->layerId();
  2985. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(max, cur_level)) {
  2986. return at::_ops::clamp_max__Tensor::call(self, max);
  2987. }
  2988. Tensor self_value;
  2989. optional<int64_t> self_bdim;
  2990. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  2991. Tensor max_value;
  2992. optional<int64_t> max_bdim;
  2993. std::tie(max_value, max_bdim) = unwrapTensorAtLevel(max, cur_level);
  2994. batch_rule(self_value, self_bdim, max_value, max_bdim);
  2995. return self;
  2996. }
  2997. template <typename batch_rule_t, batch_rule_t batch_rule>
  2998. at::Tensor clamp_min_generated_plumbing(const at::Tensor & self, const at::Scalar & min) {
  2999. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3000. auto maybe_layer = maybeCurrentDynamicLayer();
  3001. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3002. int64_t cur_level = maybe_layer->layerId();
  3003. if (!isBatchedAtLevel(self, cur_level)) {
  3004. return at::_ops::clamp_min::call(self, min);
  3005. }
  3006. Tensor self_value;
  3007. optional<int64_t> self_bdim;
  3008. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  3009. auto results = batch_rule(self_value, self_bdim, min);
  3010. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  3011. }
  3012. template <typename batch_rule_t, batch_rule_t batch_rule>
  3013. at::Tensor clamp_min_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & min) {
  3014. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3015. auto maybe_layer = maybeCurrentDynamicLayer();
  3016. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3017. int64_t cur_level = maybe_layer->layerId();
  3018. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level)) {
  3019. return at::_ops::clamp_min_Tensor::call(self, min);
  3020. }
  3021. Tensor self_value;
  3022. optional<int64_t> self_bdim;
  3023. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  3024. Tensor min_value;
  3025. optional<int64_t> min_bdim;
  3026. std::tie(min_value, min_bdim) = unwrapTensorAtLevel(min, cur_level);
  3027. auto results = batch_rule(self_value, self_bdim, min_value, min_bdim);
  3028. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  3029. }
  3030. template <typename batch_rule_t, batch_rule_t batch_rule>
  3031. at::Tensor & clamp_min__generated_plumbing(at::Tensor & self, const at::Scalar & min) {
  3032. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3033. auto maybe_layer = maybeCurrentDynamicLayer();
  3034. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  3035. int64_t cur_level = maybe_layer->layerId();
  3036. if (!isBatchedAtLevel(self, cur_level)) {
  3037. return at::_ops::clamp_min_::call(self, min);
  3038. }
  3039. Tensor self_value;
  3040. optional<int64_t> self_bdim;
  3041. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  3042. batch_rule(self_value, self_bdim, min);
  3043. return self;
  3044. }
  3045. template <typename batch_rule_t, batch_rule_t batch_rule>
  3046. at::Tensor & clamp_min__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & min) {
  3047. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3048. auto maybe_layer = maybeCurrentDynamicLayer();
  3049. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  3050. int64_t cur_level = maybe_layer->layerId();
  3051. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level)) {
  3052. return at::_ops::clamp_min__Tensor::call(self, min);
  3053. }
  3054. Tensor self_value;
  3055. optional<int64_t> self_bdim;
  3056. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  3057. Tensor min_value;
  3058. optional<int64_t> min_bdim;
  3059. std::tie(min_value, min_bdim) = unwrapTensorAtLevel(min, cur_level);
  3060. batch_rule(self_value, self_bdim, min_value, min_bdim);
  3061. return self;
  3062. }
  3063. template <typename batch_rule_t, batch_rule_t batch_rule>
  3064. at::Tensor clip_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) {
  3065. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3066. auto maybe_layer = maybeCurrentDynamicLayer();
  3067. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3068. int64_t cur_level = maybe_layer->layerId();
  3069. if (!isBatchedAtLevel(self, cur_level)) {
  3070. return at::_ops::clip::call(self, min, max);
  3071. }
  3072. Tensor self_value;
  3073. optional<int64_t> self_bdim;
  3074. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  3075. auto results = batch_rule(self_value, self_bdim, min, max);
  3076. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  3077. }
  3078. template <typename batch_rule_t, batch_rule_t batch_rule>
  3079. at::Tensor clip_Tensor_generated_plumbing(const at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) {
  3080. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3081. auto maybe_layer = maybeCurrentDynamicLayer();
  3082. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3083. int64_t cur_level = maybe_layer->layerId();
  3084. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level) && !isBatchedAtLevel(max, cur_level)) {
  3085. return at::_ops::clip_Tensor::call(self, min, max);
  3086. }
  3087. Tensor self_value;
  3088. optional<int64_t> self_bdim;
  3089. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  3090. optional<Tensor> min_value;
  3091. optional<int64_t> min_bdim;
  3092. if (min) {
  3093. std::tie(min_value, min_bdim) = unwrapTensorAtLevel(min.value(), cur_level);
  3094. }
  3095. optional<Tensor> max_value;
  3096. optional<int64_t> max_bdim;
  3097. if (max) {
  3098. std::tie(max_value, max_bdim) = unwrapTensorAtLevel(max.value(), cur_level);
  3099. }
  3100. auto results = batch_rule(self_value, self_bdim, min_value, min_bdim, max_value, max_bdim);
  3101. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  3102. }
  3103. template <typename batch_rule_t, batch_rule_t batch_rule>
  3104. at::Tensor & clip__generated_plumbing(at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) {
  3105. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3106. auto maybe_layer = maybeCurrentDynamicLayer();
  3107. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  3108. int64_t cur_level = maybe_layer->layerId();
  3109. if (!isBatchedAtLevel(self, cur_level)) {
  3110. return at::_ops::clip_::call(self, min, max);
  3111. }
  3112. Tensor self_value;
  3113. optional<int64_t> self_bdim;
  3114. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  3115. batch_rule(self_value, self_bdim, min, max);
  3116. return self;
  3117. }
  3118. template <typename batch_rule_t, batch_rule_t batch_rule>
  3119. at::Tensor & clip__Tensor_generated_plumbing(at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) {
  3120. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3121. auto maybe_layer = maybeCurrentDynamicLayer();
  3122. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  3123. int64_t cur_level = maybe_layer->layerId();
  3124. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level) && !isBatchedAtLevel(max, cur_level)) {
  3125. return at::_ops::clip__Tensor::call(self, min, max);
  3126. }
  3127. Tensor self_value;
  3128. optional<int64_t> self_bdim;
  3129. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  3130. optional<Tensor> min_value;
  3131. optional<int64_t> min_bdim;
  3132. if (min) {
  3133. std::tie(min_value, min_bdim) = unwrapTensorAtLevel(min.value(), cur_level);
  3134. }
  3135. optional<Tensor> max_value;
  3136. optional<int64_t> max_bdim;
  3137. if (max) {
  3138. std::tie(max_value, max_bdim) = unwrapTensorAtLevel(max.value(), cur_level);
  3139. }
  3140. batch_rule(self_value, self_bdim, min_value, min_bdim, max_value, max_bdim);
  3141. return self;
  3142. }
  3143. template <typename batch_rule_t, batch_rule_t batch_rule>
  3144. at::Tensor complex_generated_plumbing(const at::Tensor & real, const at::Tensor & imag) {
  3145. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3146. auto maybe_layer = maybeCurrentDynamicLayer();
  3147. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3148. int64_t cur_level = maybe_layer->layerId();
  3149. if (!isBatchedAtLevel(real, cur_level) && !isBatchedAtLevel(imag, cur_level)) {
  3150. return at::_ops::complex::call(real, imag);
  3151. }
  3152. Tensor real_value;
  3153. optional<int64_t> real_bdim;
  3154. std::tie(real_value, real_bdim) = unwrapTensorAtLevel(real, cur_level);
  3155. Tensor imag_value;
  3156. optional<int64_t> imag_bdim;
  3157. std::tie(imag_value, imag_bdim) = unwrapTensorAtLevel(imag, cur_level);
  3158. auto results = batch_rule(real_value, real_bdim, imag_value, imag_bdim);
  3159. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  3160. }
  3161. template <typename batch_rule_t, batch_rule_t batch_rule>
  3162. at::Tensor polar_generated_plumbing(const at::Tensor & abs, const at::Tensor & angle) {
  3163. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3164. auto maybe_layer = maybeCurrentDynamicLayer();
  3165. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3166. int64_t cur_level = maybe_layer->layerId();
  3167. if (!isBatchedAtLevel(abs, cur_level) && !isBatchedAtLevel(angle, cur_level)) {
  3168. return at::_ops::polar::call(abs, angle);
  3169. }
  3170. Tensor abs_value;
  3171. optional<int64_t> abs_bdim;
  3172. std::tie(abs_value, abs_bdim) = unwrapTensorAtLevel(abs, cur_level);
  3173. Tensor angle_value;
  3174. optional<int64_t> angle_bdim;
  3175. std::tie(angle_value, angle_bdim) = unwrapTensorAtLevel(angle, cur_level);
  3176. auto results = batch_rule(abs_value, abs_bdim, angle_value, angle_bdim);
  3177. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  3178. }
  3179. template <typename batch_rule_t, batch_rule_t batch_rule>
  3180. at::Tensor constant_pad_nd_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value) {
  3181. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3182. auto maybe_layer = maybeCurrentDynamicLayer();
  3183. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3184. int64_t cur_level = maybe_layer->layerId();
  3185. if (!isBatchedAtLevel(self, cur_level)) {
  3186. return at::_ops::constant_pad_nd::call(self, pad, value);
  3187. }
  3188. Tensor self_value;
  3189. optional<int64_t> self_bdim;
  3190. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  3191. auto results = batch_rule(self_value, self_bdim, pad, value);
  3192. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  3193. }
  3194. template <typename batch_rule_t, batch_rule_t batch_rule>
  3195. at::Tensor contiguous_generated_plumbing(const at::Tensor & self, at::MemoryFormat memory_format) {
  3196. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3197. auto maybe_layer = maybeCurrentDynamicLayer();
  3198. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3199. int64_t cur_level = maybe_layer->layerId();
  3200. if (!isBatchedAtLevel(self, cur_level)) {
  3201. return at::_ops::contiguous::call(self, memory_format);
  3202. }
  3203. Tensor self_value;
  3204. optional<int64_t> self_bdim;
  3205. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  3206. auto results = batch_rule(self_value, self_bdim, memory_format);
  3207. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  3208. }
  3209. template <typename batch_rule_t, batch_rule_t batch_rule>
  3210. at::Tensor convolution_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups) {
  3211. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3212. auto maybe_layer = maybeCurrentDynamicLayer();
  3213. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3214. int64_t cur_level = maybe_layer->layerId();
  3215. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  3216. return at::_ops::convolution::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups);
  3217. }
  3218. Tensor input_value;
  3219. optional<int64_t> input_bdim;
  3220. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  3221. Tensor weight_value;
  3222. optional<int64_t> weight_bdim;
  3223. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  3224. optional<Tensor> bias_value;
  3225. optional<int64_t> bias_bdim;
  3226. if (bias) {
  3227. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  3228. }
  3229. auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, transposed, output_padding, groups);
  3230. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  3231. }
  3232. template <typename batch_rule_t, batch_rule_t batch_rule>
  3233. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> convolution_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalSymIntArrayRef bias_sizes, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) {
  3234. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3235. auto maybe_layer = maybeCurrentDynamicLayer();
  3236. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3237. int64_t cur_level = maybe_layer->layerId();
  3238. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
  3239. return at::_ops::convolution_backward::call(grad_output, input, weight, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask);
  3240. }
  3241. Tensor grad_output_value;
  3242. optional<int64_t> grad_output_bdim;
  3243. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  3244. Tensor input_value;
  3245. optional<int64_t> input_bdim;
  3246. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  3247. Tensor weight_value;
  3248. optional<int64_t> weight_bdim;
  3249. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  3250. auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, weight_value, weight_bdim, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask);
  3251. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  3252. }
  3253. template <typename batch_rule_t, batch_rule_t batch_rule>
  3254. at::Tensor convolution_overrideable_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups) {
  3255. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3256. auto maybe_layer = maybeCurrentDynamicLayer();
  3257. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3258. int64_t cur_level = maybe_layer->layerId();
  3259. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  3260. return at::_ops::convolution_overrideable::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups);
  3261. }
  3262. Tensor input_value;
  3263. optional<int64_t> input_bdim;
  3264. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  3265. Tensor weight_value;
  3266. optional<int64_t> weight_bdim;
  3267. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  3268. optional<Tensor> bias_value;
  3269. optional<int64_t> bias_bdim;
  3270. if (bias) {
  3271. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  3272. }
  3273. auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, transposed, output_padding, groups);
  3274. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  3275. }
  3276. template <typename batch_rule_t, batch_rule_t batch_rule>
  3277. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> convolution_backward_overrideable_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) {
  3278. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3279. auto maybe_layer = maybeCurrentDynamicLayer();
  3280. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3281. int64_t cur_level = maybe_layer->layerId();
  3282. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
  3283. return at::_ops::convolution_backward_overrideable::call(grad_output, input, weight, stride, padding, dilation, transposed, output_padding, groups, output_mask);
  3284. }
  3285. Tensor grad_output_value;
  3286. optional<int64_t> grad_output_bdim;
  3287. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  3288. Tensor input_value;
  3289. optional<int64_t> input_bdim;
  3290. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  3291. Tensor weight_value;
  3292. optional<int64_t> weight_bdim;
  3293. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  3294. auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, weight_value, weight_bdim, stride, padding, dilation, transposed, output_padding, groups, output_mask);
  3295. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  3296. }
  3297. template <typename batch_rule_t, batch_rule_t batch_rule>
  3298. at::Tensor _convolution_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) {
  3299. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3300. auto maybe_layer = maybeCurrentDynamicLayer();
  3301. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3302. int64_t cur_level = maybe_layer->layerId();
  3303. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  3304. return at::_ops::_convolution::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32);
  3305. }
  3306. Tensor input_value;
  3307. optional<int64_t> input_bdim;
  3308. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  3309. Tensor weight_value;
  3310. optional<int64_t> weight_bdim;
  3311. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  3312. optional<Tensor> bias_value;
  3313. optional<int64_t> bias_bdim;
  3314. if (bias) {
  3315. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  3316. }
  3317. auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32);
  3318. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  3319. }
  3320. template <typename batch_rule_t, batch_rule_t batch_rule>
  3321. at::Tensor _convolution_deprecated_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled) {
  3322. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3323. auto maybe_layer = maybeCurrentDynamicLayer();
  3324. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3325. int64_t cur_level = maybe_layer->layerId();
  3326. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  3327. return at::_ops::_convolution_deprecated::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled);
  3328. }
  3329. Tensor input_value;
  3330. optional<int64_t> input_bdim;
  3331. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  3332. Tensor weight_value;
  3333. optional<int64_t> weight_bdim;
  3334. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  3335. optional<Tensor> bias_value;
  3336. optional<int64_t> bias_bdim;
  3337. if (bias) {
  3338. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  3339. }
  3340. auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled);
  3341. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  3342. }
  3343. template <typename batch_rule_t, batch_rule_t batch_rule>
  3344. at::Tensor _convolution_mode_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) {
  3345. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3346. auto maybe_layer = maybeCurrentDynamicLayer();
  3347. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3348. int64_t cur_level = maybe_layer->layerId();
  3349. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  3350. return at::_ops::_convolution_mode::call(input, weight, bias, stride, padding, dilation, groups);
  3351. }
  3352. Tensor input_value;
  3353. optional<int64_t> input_bdim;
  3354. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  3355. Tensor weight_value;
  3356. optional<int64_t> weight_bdim;
  3357. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  3358. optional<Tensor> bias_value;
  3359. optional<int64_t> bias_bdim;
  3360. if (bias) {
  3361. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  3362. }
  3363. auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
  3364. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  3365. }
  3366. template <typename batch_rule_t, batch_rule_t batch_rule>
  3367. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _convolution_double_backward_generated_plumbing(const c10::optional<at::Tensor> & ggI, const c10::optional<at::Tensor> & ggW, const c10::optional<at::Tensor> & ggb, const at::Tensor & gO, const at::Tensor & weight, const at::Tensor & self, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) {
  3368. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3369. auto maybe_layer = maybeCurrentDynamicLayer();
  3370. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3371. int64_t cur_level = maybe_layer->layerId();
  3372. if (!isBatchedAtLevel(ggI, cur_level) && !isBatchedAtLevel(ggW, cur_level) && !isBatchedAtLevel(ggb, cur_level) && !isBatchedAtLevel(gO, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(self, cur_level)) {
  3373. return at::_ops::_convolution_double_backward::call(ggI, ggW, ggb, gO, weight, self, stride, padding, dilation, transposed, output_padding, groups, output_mask);
  3374. }
  3375. Tensor gO_value;
  3376. optional<int64_t> gO_bdim;
  3377. std::tie(gO_value, gO_bdim) = unwrapTensorAtLevel(gO, cur_level);
  3378. Tensor weight_value;
  3379. optional<int64_t> weight_bdim;
  3380. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  3381. Tensor self_value;
  3382. optional<int64_t> self_bdim;
  3383. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  3384. optional<Tensor> ggI_value;
  3385. optional<int64_t> ggI_bdim;
  3386. if (ggI) {
  3387. std::tie(ggI_value, ggI_bdim) = unwrapTensorAtLevel(ggI.value(), cur_level);
  3388. }
  3389. optional<Tensor> ggW_value;
  3390. optional<int64_t> ggW_bdim;
  3391. if (ggW) {
  3392. std::tie(ggW_value, ggW_bdim) = unwrapTensorAtLevel(ggW.value(), cur_level);
  3393. }
  3394. optional<Tensor> ggb_value;
  3395. optional<int64_t> ggb_bdim;
  3396. if (ggb) {
  3397. std::tie(ggb_value, ggb_bdim) = unwrapTensorAtLevel(ggb.value(), cur_level);
  3398. }
  3399. auto results = batch_rule(ggI_value, ggI_bdim, ggW_value, ggW_bdim, ggb_value, ggb_bdim, gO_value, gO_bdim, weight_value, weight_bdim, self_value, self_bdim, stride, padding, dilation, transposed, output_padding, groups, output_mask);
  3400. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  3401. }
  3402. template <typename batch_rule_t, batch_rule_t batch_rule>
  3403. at::Tensor conv1d_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
  3404. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3405. auto maybe_layer = maybeCurrentDynamicLayer();
  3406. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3407. int64_t cur_level = maybe_layer->layerId();
  3408. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  3409. return at::_ops::conv1d::call(input, weight, bias, stride, padding, dilation, groups);
  3410. }
  3411. Tensor input_value;
  3412. optional<int64_t> input_bdim;
  3413. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  3414. Tensor weight_value;
  3415. optional<int64_t> weight_bdim;
  3416. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  3417. optional<Tensor> bias_value;
  3418. optional<int64_t> bias_bdim;
  3419. if (bias) {
  3420. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  3421. }
  3422. auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
  3423. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  3424. }
  3425. template <typename batch_rule_t, batch_rule_t batch_rule>
  3426. at::Tensor conv2d_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
  3427. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3428. auto maybe_layer = maybeCurrentDynamicLayer();
  3429. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3430. int64_t cur_level = maybe_layer->layerId();
  3431. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  3432. return at::_ops::conv2d::call(input, weight, bias, stride, padding, dilation, groups);
  3433. }
  3434. Tensor input_value;
  3435. optional<int64_t> input_bdim;
  3436. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  3437. Tensor weight_value;
  3438. optional<int64_t> weight_bdim;
  3439. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  3440. optional<Tensor> bias_value;
  3441. optional<int64_t> bias_bdim;
  3442. if (bias) {
  3443. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  3444. }
  3445. auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
  3446. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  3447. }
  3448. template <typename batch_rule_t, batch_rule_t batch_rule>
  3449. at::Tensor conv3d_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
  3450. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3451. auto maybe_layer = maybeCurrentDynamicLayer();
  3452. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3453. int64_t cur_level = maybe_layer->layerId();
  3454. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  3455. return at::_ops::conv3d::call(input, weight, bias, stride, padding, dilation, groups);
  3456. }
  3457. Tensor input_value;
  3458. optional<int64_t> input_bdim;
  3459. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  3460. Tensor weight_value;
  3461. optional<int64_t> weight_bdim;
  3462. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  3463. optional<Tensor> bias_value;
  3464. optional<int64_t> bias_bdim;
  3465. if (bias) {
  3466. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  3467. }
  3468. auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
  3469. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  3470. }
  3471. template <typename batch_rule_t, batch_rule_t batch_rule>
  3472. at::Tensor conv1d_padding_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) {
  3473. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3474. auto maybe_layer = maybeCurrentDynamicLayer();
  3475. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3476. int64_t cur_level = maybe_layer->layerId();
  3477. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  3478. return at::_ops::conv1d_padding::call(input, weight, bias, stride, padding, dilation, groups);
  3479. }
  3480. Tensor input_value;
  3481. optional<int64_t> input_bdim;
  3482. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  3483. Tensor weight_value;
  3484. optional<int64_t> weight_bdim;
  3485. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  3486. optional<Tensor> bias_value;
  3487. optional<int64_t> bias_bdim;
  3488. if (bias) {
  3489. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  3490. }
  3491. auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
  3492. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  3493. }
  3494. template <typename batch_rule_t, batch_rule_t batch_rule>
  3495. at::Tensor conv2d_padding_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) {
  3496. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3497. auto maybe_layer = maybeCurrentDynamicLayer();
  3498. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3499. int64_t cur_level = maybe_layer->layerId();
  3500. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  3501. return at::_ops::conv2d_padding::call(input, weight, bias, stride, padding, dilation, groups);
  3502. }
  3503. Tensor input_value;
  3504. optional<int64_t> input_bdim;
  3505. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  3506. Tensor weight_value;
  3507. optional<int64_t> weight_bdim;
  3508. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  3509. optional<Tensor> bias_value;
  3510. optional<int64_t> bias_bdim;
  3511. if (bias) {
  3512. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  3513. }
  3514. auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
  3515. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  3516. }
  3517. template <typename batch_rule_t, batch_rule_t batch_rule>
  3518. at::Tensor conv3d_padding_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) {
  3519. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3520. auto maybe_layer = maybeCurrentDynamicLayer();
  3521. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3522. int64_t cur_level = maybe_layer->layerId();
  3523. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  3524. return at::_ops::conv3d_padding::call(input, weight, bias, stride, padding, dilation, groups);
  3525. }
  3526. Tensor input_value;
  3527. optional<int64_t> input_bdim;
  3528. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  3529. Tensor weight_value;
  3530. optional<int64_t> weight_bdim;
  3531. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  3532. optional<Tensor> bias_value;
  3533. optional<int64_t> bias_bdim;
  3534. if (bias) {
  3535. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  3536. }
  3537. auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
  3538. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  3539. }
  3540. template <typename batch_rule_t, batch_rule_t batch_rule>
  3541. at::Tensor conv_tbc_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad) {
  3542. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3543. auto maybe_layer = maybeCurrentDynamicLayer();
  3544. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3545. int64_t cur_level = maybe_layer->layerId();
  3546. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  3547. return at::_ops::conv_tbc::call(self, weight, bias, pad);
  3548. }
  3549. Tensor self_value;
  3550. optional<int64_t> self_bdim;
  3551. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  3552. Tensor weight_value;
  3553. optional<int64_t> weight_bdim;
  3554. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  3555. Tensor bias_value;
  3556. optional<int64_t> bias_bdim;
  3557. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias, cur_level);
  3558. auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, pad);
  3559. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  3560. }
  3561. template <typename batch_rule_t, batch_rule_t batch_rule>
  3562. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> conv_tbc_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, int64_t pad) {
  3563. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3564. auto maybe_layer = maybeCurrentDynamicLayer();
  3565. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3566. int64_t cur_level = maybe_layer->layerId();
  3567. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  3568. return at::_ops::conv_tbc_backward::call(self, input, weight, bias, pad);
  3569. }
  3570. Tensor self_value;
  3571. optional<int64_t> self_bdim;
  3572. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  3573. Tensor input_value;
  3574. optional<int64_t> input_bdim;
  3575. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  3576. Tensor weight_value;
  3577. optional<int64_t> weight_bdim;
  3578. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  3579. Tensor bias_value;
  3580. optional<int64_t> bias_bdim;
  3581. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias, cur_level);
  3582. auto results = batch_rule(self_value, self_bdim, input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, pad);
  3583. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  3584. }
  3585. template <typename batch_rule_t, batch_rule_t batch_rule>
  3586. at::Tensor conv_transpose1d_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, int64_t groups, at::IntArrayRef dilation) {
  3587. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3588. auto maybe_layer = maybeCurrentDynamicLayer();
  3589. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3590. int64_t cur_level = maybe_layer->layerId();
  3591. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  3592. return at::_ops::conv_transpose1d::call(input, weight, bias, stride, padding, output_padding, groups, dilation);
  3593. }
  3594. Tensor input_value;
  3595. optional<int64_t> input_bdim;
  3596. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  3597. Tensor weight_value;
  3598. optional<int64_t> weight_bdim;
  3599. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  3600. optional<Tensor> bias_value;
  3601. optional<int64_t> bias_bdim;
  3602. if (bias) {
  3603. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  3604. }
  3605. auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, output_padding, groups, dilation);
  3606. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  3607. }
  3608. template <typename batch_rule_t, batch_rule_t batch_rule>
  3609. at::Tensor conv_transpose2d_input_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, int64_t groups, at::IntArrayRef dilation) {
  3610. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3611. auto maybe_layer = maybeCurrentDynamicLayer();
  3612. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3613. int64_t cur_level = maybe_layer->layerId();
  3614. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  3615. return at::_ops::conv_transpose2d_input::call(input, weight, bias, stride, padding, output_padding, groups, dilation);
  3616. }
  3617. Tensor input_value;
  3618. optional<int64_t> input_bdim;
  3619. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  3620. Tensor weight_value;
  3621. optional<int64_t> weight_bdim;
  3622. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  3623. optional<Tensor> bias_value;
  3624. optional<int64_t> bias_bdim;
  3625. if (bias) {
  3626. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  3627. }
  3628. auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, output_padding, groups, dilation);
  3629. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  3630. }
  3631. template <typename batch_rule_t, batch_rule_t batch_rule>
  3632. at::Tensor conv_transpose3d_input_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, int64_t groups, at::IntArrayRef dilation) {
  3633. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3634. auto maybe_layer = maybeCurrentDynamicLayer();
  3635. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3636. int64_t cur_level = maybe_layer->layerId();
  3637. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  3638. return at::_ops::conv_transpose3d_input::call(input, weight, bias, stride, padding, output_padding, groups, dilation);
  3639. }
  3640. Tensor input_value;
  3641. optional<int64_t> input_bdim;
  3642. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  3643. Tensor weight_value;
  3644. optional<int64_t> weight_bdim;
  3645. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  3646. optional<Tensor> bias_value;
  3647. optional<int64_t> bias_bdim;
  3648. if (bias) {
  3649. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  3650. }
  3651. auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, output_padding, groups, dilation);
  3652. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  3653. }
  3654. template <typename batch_rule_t, batch_rule_t batch_rule>
  3655. at::Tensor copy_generated_plumbing(const at::Tensor & self, const at::Tensor & src, bool non_blocking) {
  3656. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3657. auto maybe_layer = maybeCurrentDynamicLayer();
  3658. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3659. int64_t cur_level = maybe_layer->layerId();
  3660. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
  3661. return at::_ops::copy::call(self, src, non_blocking);
  3662. }
  3663. Tensor self_value;
  3664. optional<int64_t> self_bdim;
  3665. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  3666. Tensor src_value;
  3667. optional<int64_t> src_bdim;
  3668. std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
  3669. auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, non_blocking);
  3670. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  3671. }
  3672. template <typename batch_rule_t, batch_rule_t batch_rule>
  3673. at::Tensor & copy__generated_plumbing(at::Tensor & self, const at::Tensor & src, bool non_blocking) {
  3674. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3675. auto maybe_layer = maybeCurrentDynamicLayer();
  3676. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  3677. int64_t cur_level = maybe_layer->layerId();
  3678. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
  3679. return at::_ops::copy_::call(self, src, non_blocking);
  3680. }
  3681. Tensor self_value;
  3682. optional<int64_t> self_bdim;
  3683. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  3684. Tensor src_value;
  3685. optional<int64_t> src_bdim;
  3686. std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
  3687. batch_rule(self_value, self_bdim, src_value, src_bdim, non_blocking);
  3688. return self;
  3689. }
  3690. template <typename batch_rule_t, batch_rule_t batch_rule>
  3691. at::Tensor _copy_from_generated_plumbing(const at::Tensor & self, const at::Tensor & dst, bool non_blocking) {
  3692. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3693. auto maybe_layer = maybeCurrentDynamicLayer();
  3694. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3695. int64_t cur_level = maybe_layer->layerId();
  3696. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(dst, cur_level)) {
  3697. return at::_ops::_copy_from::call(self, dst, non_blocking);
  3698. }
  3699. Tensor self_value;
  3700. optional<int64_t> self_bdim;
  3701. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  3702. Tensor dst_value;
  3703. optional<int64_t> dst_bdim;
  3704. std::tie(dst_value, dst_bdim) = unwrapTensorAtLevel(dst, cur_level);
  3705. auto results = batch_rule(self_value, self_bdim, dst_value, dst_bdim, non_blocking);
  3706. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  3707. }
  3708. template <typename batch_rule_t, batch_rule_t batch_rule>
  3709. at::Tensor _copy_from_and_resize_generated_plumbing(const at::Tensor & self, const at::Tensor & dst) {
  3710. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3711. auto maybe_layer = maybeCurrentDynamicLayer();
  3712. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3713. int64_t cur_level = maybe_layer->layerId();
  3714. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(dst, cur_level)) {
  3715. return at::_ops::_copy_from_and_resize::call(self, dst);
  3716. }
  3717. Tensor self_value;
  3718. optional<int64_t> self_bdim;
  3719. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  3720. Tensor dst_value;
  3721. optional<int64_t> dst_bdim;
  3722. std::tie(dst_value, dst_bdim) = unwrapTensorAtLevel(dst, cur_level);
  3723. auto results = batch_rule(self_value, self_bdim, dst_value, dst_bdim);
  3724. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  3725. }
  3726. template <typename batch_rule_t, batch_rule_t batch_rule>
  3727. at::Tensor cos_generated_plumbing(const at::Tensor & self) {
  3728. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3729. auto maybe_layer = maybeCurrentDynamicLayer();
  3730. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3731. int64_t cur_level = maybe_layer->layerId();
  3732. if (!isBatchedAtLevel(self, cur_level)) {
  3733. return at::_ops::cos::call(self);
  3734. }
  3735. Tensor self_value;
  3736. optional<int64_t> self_bdim;
  3737. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  3738. auto results = batch_rule(self_value, self_bdim);
  3739. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  3740. }
  3741. template <typename batch_rule_t, batch_rule_t batch_rule>
  3742. at::Tensor & cos__generated_plumbing(at::Tensor & self) {
  3743. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3744. auto maybe_layer = maybeCurrentDynamicLayer();
  3745. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  3746. int64_t cur_level = maybe_layer->layerId();
  3747. if (!isBatchedAtLevel(self, cur_level)) {
  3748. return at::_ops::cos_::call(self);
  3749. }
  3750. Tensor self_value;
  3751. optional<int64_t> self_bdim;
  3752. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  3753. batch_rule(self_value, self_bdim);
  3754. return self;
  3755. }
  3756. template <typename batch_rule_t, batch_rule_t batch_rule>
  3757. at::Tensor cosh_generated_plumbing(const at::Tensor & self) {
  3758. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3759. auto maybe_layer = maybeCurrentDynamicLayer();
  3760. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3761. int64_t cur_level = maybe_layer->layerId();
  3762. if (!isBatchedAtLevel(self, cur_level)) {
  3763. return at::_ops::cosh::call(self);
  3764. }
  3765. Tensor self_value;
  3766. optional<int64_t> self_bdim;
  3767. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  3768. auto results = batch_rule(self_value, self_bdim);
  3769. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  3770. }
  3771. template <typename batch_rule_t, batch_rule_t batch_rule>
  3772. at::Tensor & cosh__generated_plumbing(at::Tensor & self) {
  3773. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3774. auto maybe_layer = maybeCurrentDynamicLayer();
  3775. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  3776. int64_t cur_level = maybe_layer->layerId();
  3777. if (!isBatchedAtLevel(self, cur_level)) {
  3778. return at::_ops::cosh_::call(self);
  3779. }
  3780. Tensor self_value;
  3781. optional<int64_t> self_bdim;
  3782. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  3783. batch_rule(self_value, self_bdim);
  3784. return self;
  3785. }
  3786. template <typename batch_rule_t, batch_rule_t batch_rule>
  3787. at::Tensor cosine_embedding_loss_generated_plumbing(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction) {
  3788. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3789. auto maybe_layer = maybeCurrentDynamicLayer();
  3790. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3791. int64_t cur_level = maybe_layer->layerId();
  3792. if (!isBatchedAtLevel(input1, cur_level) && !isBatchedAtLevel(input2, cur_level) && !isBatchedAtLevel(target, cur_level)) {
  3793. return at::_ops::cosine_embedding_loss::call(input1, input2, target, margin, reduction);
  3794. }
  3795. Tensor input1_value;
  3796. optional<int64_t> input1_bdim;
  3797. std::tie(input1_value, input1_bdim) = unwrapTensorAtLevel(input1, cur_level);
  3798. Tensor input2_value;
  3799. optional<int64_t> input2_bdim;
  3800. std::tie(input2_value, input2_bdim) = unwrapTensorAtLevel(input2, cur_level);
  3801. Tensor target_value;
  3802. optional<int64_t> target_bdim;
  3803. std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
  3804. auto results = batch_rule(input1_value, input1_bdim, input2_value, input2_bdim, target_value, target_bdim, margin, reduction);
  3805. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  3806. }
  3807. template <typename batch_rule_t, batch_rule_t batch_rule>
  3808. at::Tensor count_nonzero_dim_IntList_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim) {
  3809. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3810. auto maybe_layer = maybeCurrentDynamicLayer();
  3811. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3812. int64_t cur_level = maybe_layer->layerId();
  3813. if (!isBatchedAtLevel(self, cur_level)) {
  3814. return at::_ops::count_nonzero_dim_IntList::call(self, dim);
  3815. }
  3816. Tensor self_value;
  3817. optional<int64_t> self_bdim;
  3818. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  3819. auto results = batch_rule(self_value, self_bdim, dim);
  3820. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  3821. }
  3822. template <typename batch_rule_t, batch_rule_t batch_rule>
  3823. at::Tensor count_nonzero_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> dim) {
  3824. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3825. auto maybe_layer = maybeCurrentDynamicLayer();
  3826. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3827. int64_t cur_level = maybe_layer->layerId();
  3828. if (!isBatchedAtLevel(self, cur_level)) {
  3829. return at::_ops::count_nonzero::call(self, dim);
  3830. }
  3831. Tensor self_value;
  3832. optional<int64_t> self_bdim;
  3833. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  3834. auto results = batch_rule(self_value, self_bdim, dim);
  3835. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  3836. }
  3837. template <typename batch_rule_t, batch_rule_t batch_rule>
  3838. at::Tensor cov_generated_plumbing(const at::Tensor & self, int64_t correction, const c10::optional<at::Tensor> & fweights, const c10::optional<at::Tensor> & aweights) {
  3839. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3840. auto maybe_layer = maybeCurrentDynamicLayer();
  3841. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3842. int64_t cur_level = maybe_layer->layerId();
  3843. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(fweights, cur_level) && !isBatchedAtLevel(aweights, cur_level)) {
  3844. return at::_ops::cov::call(self, correction, fweights, aweights);
  3845. }
  3846. Tensor self_value;
  3847. optional<int64_t> self_bdim;
  3848. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  3849. optional<Tensor> fweights_value;
  3850. optional<int64_t> fweights_bdim;
  3851. if (fweights) {
  3852. std::tie(fweights_value, fweights_bdim) = unwrapTensorAtLevel(fweights.value(), cur_level);
  3853. }
  3854. optional<Tensor> aweights_value;
  3855. optional<int64_t> aweights_bdim;
  3856. if (aweights) {
  3857. std::tie(aweights_value, aweights_bdim) = unwrapTensorAtLevel(aweights.value(), cur_level);
  3858. }
  3859. auto results = batch_rule(self_value, self_bdim, correction, fweights_value, fweights_bdim, aweights_value, aweights_bdim);
  3860. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  3861. }
  3862. template <typename batch_rule_t, batch_rule_t batch_rule>
  3863. at::Tensor corrcoef_generated_plumbing(const at::Tensor & self) {
  3864. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3865. auto maybe_layer = maybeCurrentDynamicLayer();
  3866. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3867. int64_t cur_level = maybe_layer->layerId();
  3868. if (!isBatchedAtLevel(self, cur_level)) {
  3869. return at::_ops::corrcoef::call(self);
  3870. }
  3871. Tensor self_value;
  3872. optional<int64_t> self_bdim;
  3873. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  3874. auto results = batch_rule(self_value, self_bdim);
  3875. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  3876. }
  3877. template <typename batch_rule_t, batch_rule_t batch_rule>
  3878. at::Tensor cudnn_affine_grid_generator_generated_plumbing(const at::Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W) {
  3879. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3880. auto maybe_layer = maybeCurrentDynamicLayer();
  3881. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3882. int64_t cur_level = maybe_layer->layerId();
  3883. if (!isBatchedAtLevel(theta, cur_level)) {
  3884. return at::_ops::cudnn_affine_grid_generator::call(theta, N, C, H, W);
  3885. }
  3886. Tensor theta_value;
  3887. optional<int64_t> theta_bdim;
  3888. std::tie(theta_value, theta_bdim) = unwrapTensorAtLevel(theta, cur_level);
  3889. auto results = batch_rule(theta_value, theta_bdim, N, C, H, W);
  3890. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  3891. }
  3892. template <typename batch_rule_t, batch_rule_t batch_rule>
  3893. at::Tensor cudnn_affine_grid_generator_backward_generated_plumbing(const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W) {
  3894. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3895. auto maybe_layer = maybeCurrentDynamicLayer();
  3896. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3897. int64_t cur_level = maybe_layer->layerId();
  3898. if (!isBatchedAtLevel(grad, cur_level)) {
  3899. return at::_ops::cudnn_affine_grid_generator_backward::call(grad, N, C, H, W);
  3900. }
  3901. Tensor grad_value;
  3902. optional<int64_t> grad_bdim;
  3903. std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
  3904. auto results = batch_rule(grad_value, grad_bdim, N, C, H, W);
  3905. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  3906. }
  3907. template <typename batch_rule_t, batch_rule_t batch_rule>
  3908. ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> cudnn_batch_norm_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon) {
  3909. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3910. auto maybe_layer = maybeCurrentDynamicLayer();
  3911. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3912. int64_t cur_level = maybe_layer->layerId();
  3913. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
  3914. return at::_ops::cudnn_batch_norm::call(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon);
  3915. }
  3916. Tensor input_value;
  3917. optional<int64_t> input_bdim;
  3918. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  3919. Tensor weight_value;
  3920. optional<int64_t> weight_bdim;
  3921. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  3922. optional<Tensor> bias_value;
  3923. optional<int64_t> bias_bdim;
  3924. if (bias) {
  3925. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  3926. }
  3927. optional<Tensor> running_mean_value;
  3928. optional<int64_t> running_mean_bdim;
  3929. if (running_mean) {
  3930. std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
  3931. }
  3932. optional<Tensor> running_var_value;
  3933. optional<int64_t> running_var_bdim;
  3934. if (running_var) {
  3935. std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
  3936. }
  3937. auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, training, exponential_average_factor, epsilon);
  3938. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
  3939. }
  3940. template <typename batch_rule_t, batch_rule_t batch_rule>
  3941. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> cudnn_batch_norm_backward_generated_plumbing(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var, double epsilon, const at::Tensor & reserveSpace) {
  3942. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3943. auto maybe_layer = maybeCurrentDynamicLayer();
  3944. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3945. int64_t cur_level = maybe_layer->layerId();
  3946. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level) && !isBatchedAtLevel(save_mean, cur_level) && !isBatchedAtLevel(save_var, cur_level) && !isBatchedAtLevel(reserveSpace, cur_level)) {
  3947. return at::_ops::cudnn_batch_norm_backward::call(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, reserveSpace);
  3948. }
  3949. Tensor input_value;
  3950. optional<int64_t> input_bdim;
  3951. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  3952. Tensor grad_output_value;
  3953. optional<int64_t> grad_output_bdim;
  3954. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  3955. Tensor weight_value;
  3956. optional<int64_t> weight_bdim;
  3957. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  3958. Tensor reserveSpace_value;
  3959. optional<int64_t> reserveSpace_bdim;
  3960. std::tie(reserveSpace_value, reserveSpace_bdim) = unwrapTensorAtLevel(reserveSpace, cur_level);
  3961. optional<Tensor> running_mean_value;
  3962. optional<int64_t> running_mean_bdim;
  3963. if (running_mean) {
  3964. std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
  3965. }
  3966. optional<Tensor> running_var_value;
  3967. optional<int64_t> running_var_bdim;
  3968. if (running_var) {
  3969. std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
  3970. }
  3971. optional<Tensor> save_mean_value;
  3972. optional<int64_t> save_mean_bdim;
  3973. if (save_mean) {
  3974. std::tie(save_mean_value, save_mean_bdim) = unwrapTensorAtLevel(save_mean.value(), cur_level);
  3975. }
  3976. optional<Tensor> save_var_value;
  3977. optional<int64_t> save_var_bdim;
  3978. if (save_var) {
  3979. std::tie(save_var_value, save_var_bdim) = unwrapTensorAtLevel(save_var.value(), cur_level);
  3980. }
  3981. auto results = batch_rule(input_value, input_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, save_mean_value, save_mean_bdim, save_var_value, save_var_bdim, epsilon, reserveSpace_value, reserveSpace_bdim);
  3982. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  3983. }
  3984. template <typename batch_rule_t, batch_rule_t batch_rule>
  3985. at::Tensor cudnn_convolution_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32) {
  3986. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  3987. auto maybe_layer = maybeCurrentDynamicLayer();
  3988. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  3989. int64_t cur_level = maybe_layer->layerId();
  3990. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
  3991. return at::_ops::cudnn_convolution::call(self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32);
  3992. }
  3993. Tensor self_value;
  3994. optional<int64_t> self_bdim;
  3995. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  3996. Tensor weight_value;
  3997. optional<int64_t> weight_bdim;
  3998. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  3999. auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32);
  4000. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  4001. }
  4002. template <typename batch_rule_t, batch_rule_t batch_rule>
  4003. at::Tensor cudnn_convolution_transpose_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32) {
  4004. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4005. auto maybe_layer = maybeCurrentDynamicLayer();
  4006. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4007. int64_t cur_level = maybe_layer->layerId();
  4008. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
  4009. return at::_ops::cudnn_convolution_transpose::call(self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32);
  4010. }
  4011. Tensor self_value;
  4012. optional<int64_t> self_bdim;
  4013. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4014. Tensor weight_value;
  4015. optional<int64_t> weight_bdim;
  4016. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  4017. auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32);
  4018. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  4019. }
  4020. template <typename batch_rule_t, batch_rule_t batch_rule>
  4021. at::Tensor _mps_convolution_transpose_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) {
  4022. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4023. auto maybe_layer = maybeCurrentDynamicLayer();
  4024. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4025. int64_t cur_level = maybe_layer->layerId();
  4026. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
  4027. return at::_ops::_mps_convolution_transpose::call(self, weight, padding, output_padding, stride, dilation, groups);
  4028. }
  4029. Tensor self_value;
  4030. optional<int64_t> self_bdim;
  4031. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4032. Tensor weight_value;
  4033. optional<int64_t> weight_bdim;
  4034. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  4035. auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, padding, output_padding, stride, dilation, groups);
  4036. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  4037. }
  4038. template <typename batch_rule_t, batch_rule_t batch_rule>
  4039. ::std::tuple<at::Tensor,at::Tensor> mps_convolution_transpose_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array<bool,2> output_mask) {
  4040. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4041. auto maybe_layer = maybeCurrentDynamicLayer();
  4042. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4043. int64_t cur_level = maybe_layer->layerId();
  4044. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
  4045. return at::_ops::mps_convolution_transpose_backward::call(self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask);
  4046. }
  4047. Tensor self_value;
  4048. optional<int64_t> self_bdim;
  4049. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4050. Tensor grad_output_value;
  4051. optional<int64_t> grad_output_bdim;
  4052. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  4053. Tensor weight_value;
  4054. optional<int64_t> weight_bdim;
  4055. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  4056. auto results = batch_rule(self_value, self_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, padding, output_padding, stride, dilation, groups, output_mask);
  4057. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  4058. }
  4059. template <typename batch_rule_t, batch_rule_t batch_rule>
  4060. at::Tensor cudnn_convolution_relu_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
  4061. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4062. auto maybe_layer = maybeCurrentDynamicLayer();
  4063. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4064. int64_t cur_level = maybe_layer->layerId();
  4065. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  4066. return at::_ops::cudnn_convolution_relu::call(self, weight, bias, stride, padding, dilation, groups);
  4067. }
  4068. Tensor self_value;
  4069. optional<int64_t> self_bdim;
  4070. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4071. Tensor weight_value;
  4072. optional<int64_t> weight_bdim;
  4073. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  4074. optional<Tensor> bias_value;
  4075. optional<int64_t> bias_bdim;
  4076. if (bias) {
  4077. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  4078. }
  4079. auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
  4080. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  4081. }
  4082. template <typename batch_rule_t, batch_rule_t batch_rule>
  4083. at::Tensor cudnn_convolution_add_relu_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional<at::Scalar> & alpha, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
  4084. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4085. auto maybe_layer = maybeCurrentDynamicLayer();
  4086. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4087. int64_t cur_level = maybe_layer->layerId();
  4088. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(z, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  4089. return at::_ops::cudnn_convolution_add_relu::call(self, weight, z, alpha, bias, stride, padding, dilation, groups);
  4090. }
  4091. Tensor self_value;
  4092. optional<int64_t> self_bdim;
  4093. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4094. Tensor weight_value;
  4095. optional<int64_t> weight_bdim;
  4096. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  4097. Tensor z_value;
  4098. optional<int64_t> z_bdim;
  4099. std::tie(z_value, z_bdim) = unwrapTensorAtLevel(z, cur_level);
  4100. optional<Tensor> bias_value;
  4101. optional<int64_t> bias_bdim;
  4102. if (bias) {
  4103. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  4104. }
  4105. auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, z_value, z_bdim, alpha, bias_value, bias_bdim, stride, padding, dilation, groups);
  4106. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  4107. }
  4108. template <typename batch_rule_t, batch_rule_t batch_rule>
  4109. at::Tensor cudnn_grid_sampler_generated_plumbing(const at::Tensor & self, const at::Tensor & grid) {
  4110. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4111. auto maybe_layer = maybeCurrentDynamicLayer();
  4112. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4113. int64_t cur_level = maybe_layer->layerId();
  4114. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grid, cur_level)) {
  4115. return at::_ops::cudnn_grid_sampler::call(self, grid);
  4116. }
  4117. Tensor self_value;
  4118. optional<int64_t> self_bdim;
  4119. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4120. Tensor grid_value;
  4121. optional<int64_t> grid_bdim;
  4122. std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level);
  4123. auto results = batch_rule(self_value, self_bdim, grid_value, grid_bdim);
  4124. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  4125. }
  4126. template <typename batch_rule_t, batch_rule_t batch_rule>
  4127. ::std::tuple<at::Tensor,at::Tensor> cudnn_grid_sampler_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grid, const at::Tensor & grad_output) {
  4128. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4129. auto maybe_layer = maybeCurrentDynamicLayer();
  4130. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4131. int64_t cur_level = maybe_layer->layerId();
  4132. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grid, cur_level) && !isBatchedAtLevel(grad_output, cur_level)) {
  4133. return at::_ops::cudnn_grid_sampler_backward::call(self, grid, grad_output);
  4134. }
  4135. Tensor self_value;
  4136. optional<int64_t> self_bdim;
  4137. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4138. Tensor grid_value;
  4139. optional<int64_t> grid_bdim;
  4140. std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level);
  4141. Tensor grad_output_value;
  4142. optional<int64_t> grad_output_bdim;
  4143. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  4144. auto results = batch_rule(self_value, self_bdim, grid_value, grid_bdim, grad_output_value, grad_output_bdim);
  4145. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  4146. }
  4147. template <typename batch_rule_t, batch_rule_t batch_rule>
  4148. ::std::tuple<at::Tensor,at::Tensor> cummax_generated_plumbing(const at::Tensor & self, int64_t dim) {
  4149. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4150. auto maybe_layer = maybeCurrentDynamicLayer();
  4151. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4152. int64_t cur_level = maybe_layer->layerId();
  4153. if (!isBatchedAtLevel(self, cur_level)) {
  4154. return at::_ops::cummax::call(self, dim);
  4155. }
  4156. Tensor self_value;
  4157. optional<int64_t> self_bdim;
  4158. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4159. auto results = batch_rule(self_value, self_bdim, dim);
  4160. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  4161. }
  4162. template <typename batch_rule_t, batch_rule_t batch_rule>
  4163. ::std::tuple<at::Tensor,at::Tensor> cummax_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim) {
  4164. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4165. auto maybe_layer = maybeCurrentDynamicLayer();
  4166. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4167. int64_t cur_level = maybe_layer->layerId();
  4168. if (!isBatchedAtLevel(self, cur_level)) {
  4169. return at::_ops::cummax_dimname::call(self, dim);
  4170. }
  4171. Tensor self_value;
  4172. optional<int64_t> self_bdim;
  4173. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4174. auto results = batch_rule(self_value, self_bdim, dim);
  4175. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  4176. }
  4177. template <typename batch_rule_t, batch_rule_t batch_rule>
  4178. void _cummax_helper_generated_plumbing(const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim) {
  4179. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4180. auto maybe_layer = maybeCurrentDynamicLayer();
  4181. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  4182. int64_t cur_level = maybe_layer->layerId();
  4183. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(values, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
  4184. return at::_ops::_cummax_helper::call(self, values, indices, dim);
  4185. }
  4186. Tensor self_value;
  4187. optional<int64_t> self_bdim;
  4188. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4189. Tensor values_value;
  4190. optional<int64_t> values_bdim;
  4191. std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
  4192. Tensor indices_value;
  4193. optional<int64_t> indices_bdim;
  4194. std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
  4195. batch_rule(self_value, self_bdim, values_value, values_bdim, indices_value, indices_bdim, dim);
  4196. }
  4197. template <typename batch_rule_t, batch_rule_t batch_rule>
  4198. ::std::tuple<at::Tensor,at::Tensor> cummin_generated_plumbing(const at::Tensor & self, int64_t dim) {
  4199. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4200. auto maybe_layer = maybeCurrentDynamicLayer();
  4201. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4202. int64_t cur_level = maybe_layer->layerId();
  4203. if (!isBatchedAtLevel(self, cur_level)) {
  4204. return at::_ops::cummin::call(self, dim);
  4205. }
  4206. Tensor self_value;
  4207. optional<int64_t> self_bdim;
  4208. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4209. auto results = batch_rule(self_value, self_bdim, dim);
  4210. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  4211. }
  4212. template <typename batch_rule_t, batch_rule_t batch_rule>
  4213. ::std::tuple<at::Tensor,at::Tensor> cummin_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim) {
  4214. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4215. auto maybe_layer = maybeCurrentDynamicLayer();
  4216. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4217. int64_t cur_level = maybe_layer->layerId();
  4218. if (!isBatchedAtLevel(self, cur_level)) {
  4219. return at::_ops::cummin_dimname::call(self, dim);
  4220. }
  4221. Tensor self_value;
  4222. optional<int64_t> self_bdim;
  4223. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4224. auto results = batch_rule(self_value, self_bdim, dim);
  4225. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  4226. }
  4227. template <typename batch_rule_t, batch_rule_t batch_rule>
  4228. void _cummin_helper_generated_plumbing(const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim) {
  4229. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4230. auto maybe_layer = maybeCurrentDynamicLayer();
  4231. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  4232. int64_t cur_level = maybe_layer->layerId();
  4233. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(values, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
  4234. return at::_ops::_cummin_helper::call(self, values, indices, dim);
  4235. }
  4236. Tensor self_value;
  4237. optional<int64_t> self_bdim;
  4238. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4239. Tensor values_value;
  4240. optional<int64_t> values_bdim;
  4241. std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
  4242. Tensor indices_value;
  4243. optional<int64_t> indices_bdim;
  4244. std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
  4245. batch_rule(self_value, self_bdim, values_value, values_bdim, indices_value, indices_bdim, dim);
  4246. }
  4247. template <typename batch_rule_t, batch_rule_t batch_rule>
  4248. at::Tensor cummaxmin_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & indices, int64_t dim) {
  4249. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4250. auto maybe_layer = maybeCurrentDynamicLayer();
  4251. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4252. int64_t cur_level = maybe_layer->layerId();
  4253. if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
  4254. return at::_ops::cummaxmin_backward::call(grad, input, indices, dim);
  4255. }
  4256. Tensor grad_value;
  4257. optional<int64_t> grad_bdim;
  4258. std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
  4259. Tensor input_value;
  4260. optional<int64_t> input_bdim;
  4261. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  4262. Tensor indices_value;
  4263. optional<int64_t> indices_bdim;
  4264. std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
  4265. auto results = batch_rule(grad_value, grad_bdim, input_value, input_bdim, indices_value, indices_bdim, dim);
  4266. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  4267. }
  4268. template <typename batch_rule_t, batch_rule_t batch_rule>
  4269. at::Tensor cumprod_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
  4270. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4271. auto maybe_layer = maybeCurrentDynamicLayer();
  4272. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4273. int64_t cur_level = maybe_layer->layerId();
  4274. if (!isBatchedAtLevel(self, cur_level)) {
  4275. return at::_ops::cumprod::call(self, dim, dtype);
  4276. }
  4277. Tensor self_value;
  4278. optional<int64_t> self_bdim;
  4279. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4280. auto results = batch_rule(self_value, self_bdim, dim, dtype);
  4281. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  4282. }
  4283. template <typename batch_rule_t, batch_rule_t batch_rule>
  4284. at::Tensor & cumprod__generated_plumbing(at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
  4285. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4286. auto maybe_layer = maybeCurrentDynamicLayer();
  4287. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  4288. int64_t cur_level = maybe_layer->layerId();
  4289. if (!isBatchedAtLevel(self, cur_level)) {
  4290. return at::_ops::cumprod_::call(self, dim, dtype);
  4291. }
  4292. Tensor self_value;
  4293. optional<int64_t> self_bdim;
  4294. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4295. batch_rule(self_value, self_bdim, dim, dtype);
  4296. return self;
  4297. }
  4298. template <typename batch_rule_t, batch_rule_t batch_rule>
  4299. at::Tensor cumprod_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
  4300. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4301. auto maybe_layer = maybeCurrentDynamicLayer();
  4302. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4303. int64_t cur_level = maybe_layer->layerId();
  4304. if (!isBatchedAtLevel(self, cur_level)) {
  4305. return at::_ops::cumprod_dimname::call(self, dim, dtype);
  4306. }
  4307. Tensor self_value;
  4308. optional<int64_t> self_bdim;
  4309. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4310. auto results = batch_rule(self_value, self_bdim, dim, dtype);
  4311. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  4312. }
  4313. template <typename batch_rule_t, batch_rule_t batch_rule>
  4314. at::Tensor & cumprod__dimname_generated_plumbing(at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
  4315. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4316. auto maybe_layer = maybeCurrentDynamicLayer();
  4317. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  4318. int64_t cur_level = maybe_layer->layerId();
  4319. if (!isBatchedAtLevel(self, cur_level)) {
  4320. return at::_ops::cumprod__dimname::call(self, dim, dtype);
  4321. }
  4322. Tensor self_value;
  4323. optional<int64_t> self_bdim;
  4324. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4325. batch_rule(self_value, self_bdim, dim, dtype);
  4326. return self;
  4327. }
  4328. template <typename batch_rule_t, batch_rule_t batch_rule>
  4329. at::Tensor cumprod_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & input, int64_t dim, const at::Tensor & output) {
  4330. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4331. auto maybe_layer = maybeCurrentDynamicLayer();
  4332. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4333. int64_t cur_level = maybe_layer->layerId();
  4334. if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(output, cur_level)) {
  4335. return at::_ops::cumprod_backward::call(grad, input, dim, output);
  4336. }
  4337. Tensor grad_value;
  4338. optional<int64_t> grad_bdim;
  4339. std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
  4340. Tensor input_value;
  4341. optional<int64_t> input_bdim;
  4342. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  4343. Tensor output_value;
  4344. optional<int64_t> output_bdim;
  4345. std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
  4346. auto results = batch_rule(grad_value, grad_bdim, input_value, input_bdim, dim, output_value, output_bdim);
  4347. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  4348. }
  4349. template <typename batch_rule_t, batch_rule_t batch_rule>
  4350. at::Tensor cumsum_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
  4351. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4352. auto maybe_layer = maybeCurrentDynamicLayer();
  4353. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4354. int64_t cur_level = maybe_layer->layerId();
  4355. if (!isBatchedAtLevel(self, cur_level)) {
  4356. return at::_ops::cumsum::call(self, dim, dtype);
  4357. }
  4358. Tensor self_value;
  4359. optional<int64_t> self_bdim;
  4360. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4361. auto results = batch_rule(self_value, self_bdim, dim, dtype);
  4362. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  4363. }
  4364. template <typename batch_rule_t, batch_rule_t batch_rule>
  4365. at::Tensor & cumsum__generated_plumbing(at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
  4366. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4367. auto maybe_layer = maybeCurrentDynamicLayer();
  4368. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  4369. int64_t cur_level = maybe_layer->layerId();
  4370. if (!isBatchedAtLevel(self, cur_level)) {
  4371. return at::_ops::cumsum_::call(self, dim, dtype);
  4372. }
  4373. Tensor self_value;
  4374. optional<int64_t> self_bdim;
  4375. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4376. batch_rule(self_value, self_bdim, dim, dtype);
  4377. return self;
  4378. }
  4379. template <typename batch_rule_t, batch_rule_t batch_rule>
  4380. at::Tensor cumsum_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
  4381. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4382. auto maybe_layer = maybeCurrentDynamicLayer();
  4383. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4384. int64_t cur_level = maybe_layer->layerId();
  4385. if (!isBatchedAtLevel(self, cur_level)) {
  4386. return at::_ops::cumsum_dimname::call(self, dim, dtype);
  4387. }
  4388. Tensor self_value;
  4389. optional<int64_t> self_bdim;
  4390. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4391. auto results = batch_rule(self_value, self_bdim, dim, dtype);
  4392. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  4393. }
  4394. template <typename batch_rule_t, batch_rule_t batch_rule>
  4395. at::Tensor & cumsum__dimname_generated_plumbing(at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
  4396. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4397. auto maybe_layer = maybeCurrentDynamicLayer();
  4398. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  4399. int64_t cur_level = maybe_layer->layerId();
  4400. if (!isBatchedAtLevel(self, cur_level)) {
  4401. return at::_ops::cumsum__dimname::call(self, dim, dtype);
  4402. }
  4403. Tensor self_value;
  4404. optional<int64_t> self_bdim;
  4405. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4406. batch_rule(self_value, self_bdim, dim, dtype);
  4407. return self;
  4408. }
  4409. template <typename batch_rule_t, batch_rule_t batch_rule>
  4410. at::Tensor cumulative_trapezoid_x_generated_plumbing(const at::Tensor & y, const at::Tensor & x, int64_t dim) {
  4411. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4412. auto maybe_layer = maybeCurrentDynamicLayer();
  4413. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4414. int64_t cur_level = maybe_layer->layerId();
  4415. if (!isBatchedAtLevel(y, cur_level) && !isBatchedAtLevel(x, cur_level)) {
  4416. return at::_ops::cumulative_trapezoid_x::call(y, x, dim);
  4417. }
  4418. Tensor y_value;
  4419. optional<int64_t> y_bdim;
  4420. std::tie(y_value, y_bdim) = unwrapTensorAtLevel(y, cur_level);
  4421. Tensor x_value;
  4422. optional<int64_t> x_bdim;
  4423. std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
  4424. auto results = batch_rule(y_value, y_bdim, x_value, x_bdim, dim);
  4425. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  4426. }
  4427. template <typename batch_rule_t, batch_rule_t batch_rule>
  4428. at::Tensor cumulative_trapezoid_dx_generated_plumbing(const at::Tensor & y, const at::Scalar & dx, int64_t dim) {
  4429. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4430. auto maybe_layer = maybeCurrentDynamicLayer();
  4431. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4432. int64_t cur_level = maybe_layer->layerId();
  4433. if (!isBatchedAtLevel(y, cur_level)) {
  4434. return at::_ops::cumulative_trapezoid_dx::call(y, dx, dim);
  4435. }
  4436. Tensor y_value;
  4437. optional<int64_t> y_bdim;
  4438. std::tie(y_value, y_bdim) = unwrapTensorAtLevel(y, cur_level);
  4439. auto results = batch_rule(y_value, y_bdim, dx, dim);
  4440. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  4441. }
  4442. template <typename batch_rule_t, batch_rule_t batch_rule>
  4443. at::Tensor ctc_loss_IntList_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, int64_t reduction, bool zero_infinity) {
  4444. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4445. auto maybe_layer = maybeCurrentDynamicLayer();
  4446. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4447. int64_t cur_level = maybe_layer->layerId();
  4448. if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level)) {
  4449. return at::_ops::ctc_loss_IntList::call(log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity);
  4450. }
  4451. Tensor log_probs_value;
  4452. optional<int64_t> log_probs_bdim;
  4453. std::tie(log_probs_value, log_probs_bdim) = unwrapTensorAtLevel(log_probs, cur_level);
  4454. Tensor targets_value;
  4455. optional<int64_t> targets_bdim;
  4456. std::tie(targets_value, targets_bdim) = unwrapTensorAtLevel(targets, cur_level);
  4457. auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths, target_lengths, blank, reduction, zero_infinity);
  4458. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  4459. }
  4460. template <typename batch_rule_t, batch_rule_t batch_rule>
  4461. at::Tensor ctc_loss_Tensor_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, int64_t reduction, bool zero_infinity) {
  4462. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4463. auto maybe_layer = maybeCurrentDynamicLayer();
  4464. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4465. int64_t cur_level = maybe_layer->layerId();
  4466. if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level) && !isBatchedAtLevel(input_lengths, cur_level) && !isBatchedAtLevel(target_lengths, cur_level)) {
  4467. return at::_ops::ctc_loss_Tensor::call(log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity);
  4468. }
  4469. Tensor log_probs_value;
  4470. optional<int64_t> log_probs_bdim;
  4471. std::tie(log_probs_value, log_probs_bdim) = unwrapTensorAtLevel(log_probs, cur_level);
  4472. Tensor targets_value;
  4473. optional<int64_t> targets_bdim;
  4474. std::tie(targets_value, targets_bdim) = unwrapTensorAtLevel(targets, cur_level);
  4475. Tensor input_lengths_value;
  4476. optional<int64_t> input_lengths_bdim;
  4477. std::tie(input_lengths_value, input_lengths_bdim) = unwrapTensorAtLevel(input_lengths, cur_level);
  4478. Tensor target_lengths_value;
  4479. optional<int64_t> target_lengths_bdim;
  4480. std::tie(target_lengths_value, target_lengths_bdim) = unwrapTensorAtLevel(target_lengths, cur_level);
  4481. auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths_value, input_lengths_bdim, target_lengths_value, target_lengths_bdim, blank, reduction, zero_infinity);
  4482. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  4483. }
  4484. template <typename batch_rule_t, batch_rule_t batch_rule>
  4485. ::std::tuple<at::Tensor,at::Tensor> _ctc_loss_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool zero_infinity) {
  4486. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4487. auto maybe_layer = maybeCurrentDynamicLayer();
  4488. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4489. int64_t cur_level = maybe_layer->layerId();
  4490. if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level)) {
  4491. return at::_ops::_ctc_loss::call(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity);
  4492. }
  4493. Tensor log_probs_value;
  4494. optional<int64_t> log_probs_bdim;
  4495. std::tie(log_probs_value, log_probs_bdim) = unwrapTensorAtLevel(log_probs, cur_level);
  4496. Tensor targets_value;
  4497. optional<int64_t> targets_bdim;
  4498. std::tie(targets_value, targets_bdim) = unwrapTensorAtLevel(targets, cur_level);
  4499. auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths, target_lengths, blank, zero_infinity);
  4500. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  4501. }
  4502. template <typename batch_rule_t, batch_rule_t batch_rule>
  4503. ::std::tuple<at::Tensor,at::Tensor> _ctc_loss_Tensor_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool zero_infinity) {
  4504. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4505. auto maybe_layer = maybeCurrentDynamicLayer();
  4506. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4507. int64_t cur_level = maybe_layer->layerId();
  4508. if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level) && !isBatchedAtLevel(input_lengths, cur_level) && !isBatchedAtLevel(target_lengths, cur_level)) {
  4509. return at::_ops::_ctc_loss_Tensor::call(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity);
  4510. }
  4511. Tensor log_probs_value;
  4512. optional<int64_t> log_probs_bdim;
  4513. std::tie(log_probs_value, log_probs_bdim) = unwrapTensorAtLevel(log_probs, cur_level);
  4514. Tensor targets_value;
  4515. optional<int64_t> targets_bdim;
  4516. std::tie(targets_value, targets_bdim) = unwrapTensorAtLevel(targets, cur_level);
  4517. Tensor input_lengths_value;
  4518. optional<int64_t> input_lengths_bdim;
  4519. std::tie(input_lengths_value, input_lengths_bdim) = unwrapTensorAtLevel(input_lengths, cur_level);
  4520. Tensor target_lengths_value;
  4521. optional<int64_t> target_lengths_bdim;
  4522. std::tie(target_lengths_value, target_lengths_bdim) = unwrapTensorAtLevel(target_lengths, cur_level);
  4523. auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths_value, input_lengths_bdim, target_lengths_value, target_lengths_bdim, blank, zero_infinity);
  4524. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  4525. }
  4526. template <typename batch_rule_t, batch_rule_t batch_rule>
  4527. at::Tensor _ctc_loss_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity) {
  4528. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4529. auto maybe_layer = maybeCurrentDynamicLayer();
  4530. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4531. int64_t cur_level = maybe_layer->layerId();
  4532. if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level) && !isBatchedAtLevel(neg_log_likelihood, cur_level) && !isBatchedAtLevel(log_alpha, cur_level)) {
  4533. return at::_ops::_ctc_loss_backward::call(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity);
  4534. }
  4535. Tensor grad_value;
  4536. optional<int64_t> grad_bdim;
  4537. std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
  4538. Tensor log_probs_value;
  4539. optional<int64_t> log_probs_bdim;
  4540. std::tie(log_probs_value, log_probs_bdim) = unwrapTensorAtLevel(log_probs, cur_level);
  4541. Tensor targets_value;
  4542. optional<int64_t> targets_bdim;
  4543. std::tie(targets_value, targets_bdim) = unwrapTensorAtLevel(targets, cur_level);
  4544. Tensor neg_log_likelihood_value;
  4545. optional<int64_t> neg_log_likelihood_bdim;
  4546. std::tie(neg_log_likelihood_value, neg_log_likelihood_bdim) = unwrapTensorAtLevel(neg_log_likelihood, cur_level);
  4547. Tensor log_alpha_value;
  4548. optional<int64_t> log_alpha_bdim;
  4549. std::tie(log_alpha_value, log_alpha_bdim) = unwrapTensorAtLevel(log_alpha, cur_level);
  4550. auto results = batch_rule(grad_value, grad_bdim, log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths, target_lengths, neg_log_likelihood_value, neg_log_likelihood_bdim, log_alpha_value, log_alpha_bdim, blank, zero_infinity);
  4551. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  4552. }
  4553. template <typename batch_rule_t, batch_rule_t batch_rule>
  4554. at::Tensor _ctc_loss_backward_Tensor_generated_plumbing(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity) {
  4555. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4556. auto maybe_layer = maybeCurrentDynamicLayer();
  4557. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4558. int64_t cur_level = maybe_layer->layerId();
  4559. if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level) && !isBatchedAtLevel(input_lengths, cur_level) && !isBatchedAtLevel(target_lengths, cur_level) && !isBatchedAtLevel(neg_log_likelihood, cur_level) && !isBatchedAtLevel(log_alpha, cur_level)) {
  4560. return at::_ops::_ctc_loss_backward_Tensor::call(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity);
  4561. }
  4562. Tensor grad_value;
  4563. optional<int64_t> grad_bdim;
  4564. std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
  4565. Tensor log_probs_value;
  4566. optional<int64_t> log_probs_bdim;
  4567. std::tie(log_probs_value, log_probs_bdim) = unwrapTensorAtLevel(log_probs, cur_level);
  4568. Tensor targets_value;
  4569. optional<int64_t> targets_bdim;
  4570. std::tie(targets_value, targets_bdim) = unwrapTensorAtLevel(targets, cur_level);
  4571. Tensor input_lengths_value;
  4572. optional<int64_t> input_lengths_bdim;
  4573. std::tie(input_lengths_value, input_lengths_bdim) = unwrapTensorAtLevel(input_lengths, cur_level);
  4574. Tensor target_lengths_value;
  4575. optional<int64_t> target_lengths_bdim;
  4576. std::tie(target_lengths_value, target_lengths_bdim) = unwrapTensorAtLevel(target_lengths, cur_level);
  4577. Tensor neg_log_likelihood_value;
  4578. optional<int64_t> neg_log_likelihood_bdim;
  4579. std::tie(neg_log_likelihood_value, neg_log_likelihood_bdim) = unwrapTensorAtLevel(neg_log_likelihood, cur_level);
  4580. Tensor log_alpha_value;
  4581. optional<int64_t> log_alpha_bdim;
  4582. std::tie(log_alpha_value, log_alpha_bdim) = unwrapTensorAtLevel(log_alpha, cur_level);
  4583. auto results = batch_rule(grad_value, grad_bdim, log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths_value, input_lengths_bdim, target_lengths_value, target_lengths_bdim, neg_log_likelihood_value, neg_log_likelihood_bdim, log_alpha_value, log_alpha_bdim, blank, zero_infinity);
  4584. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  4585. }
  4586. template <typename batch_rule_t, batch_rule_t batch_rule>
  4587. at::Tensor diag_embed_generated_plumbing(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) {
  4588. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4589. auto maybe_layer = maybeCurrentDynamicLayer();
  4590. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4591. int64_t cur_level = maybe_layer->layerId();
  4592. if (!isBatchedAtLevel(self, cur_level)) {
  4593. return at::_ops::diag_embed::call(self, offset, dim1, dim2);
  4594. }
  4595. Tensor self_value;
  4596. optional<int64_t> self_bdim;
  4597. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4598. auto results = batch_rule(self_value, self_bdim, offset, dim1, dim2);
  4599. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  4600. }
  4601. template <typename batch_rule_t, batch_rule_t batch_rule>
  4602. at::Tensor diagflat_generated_plumbing(const at::Tensor & self, int64_t offset) {
  4603. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4604. auto maybe_layer = maybeCurrentDynamicLayer();
  4605. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4606. int64_t cur_level = maybe_layer->layerId();
  4607. if (!isBatchedAtLevel(self, cur_level)) {
  4608. return at::_ops::diagflat::call(self, offset);
  4609. }
  4610. Tensor self_value;
  4611. optional<int64_t> self_bdim;
  4612. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4613. auto results = batch_rule(self_value, self_bdim, offset);
  4614. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  4615. }
  4616. template <typename batch_rule_t, batch_rule_t batch_rule>
  4617. at::Tensor diagonal_generated_plumbing(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) {
  4618. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4619. auto maybe_layer = maybeCurrentDynamicLayer();
  4620. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4621. int64_t cur_level = maybe_layer->layerId();
  4622. if (!isBatchedAtLevel(self, cur_level)) {
  4623. return at::_ops::diagonal::call(self, offset, dim1, dim2);
  4624. }
  4625. Tensor self_value;
  4626. optional<int64_t> self_bdim;
  4627. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4628. auto results = batch_rule(self_value, self_bdim, offset, dim1, dim2);
  4629. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  4630. }
  4631. template <typename batch_rule_t, batch_rule_t batch_rule>
  4632. at::Tensor linalg_diagonal_generated_plumbing(const at::Tensor & A, int64_t offset, int64_t dim1, int64_t dim2) {
  4633. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4634. auto maybe_layer = maybeCurrentDynamicLayer();
  4635. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4636. int64_t cur_level = maybe_layer->layerId();
  4637. if (!isBatchedAtLevel(A, cur_level)) {
  4638. return at::_ops::linalg_diagonal::call(A, offset, dim1, dim2);
  4639. }
  4640. Tensor A_value;
  4641. optional<int64_t> A_bdim;
  4642. std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
  4643. auto results = batch_rule(A_value, A_bdim, offset, dim1, dim2);
  4644. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  4645. }
  4646. template <typename batch_rule_t, batch_rule_t batch_rule>
  4647. at::Tensor diagonal_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname outdim, at::Dimname dim1, at::Dimname dim2, int64_t offset) {
  4648. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4649. auto maybe_layer = maybeCurrentDynamicLayer();
  4650. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4651. int64_t cur_level = maybe_layer->layerId();
  4652. if (!isBatchedAtLevel(self, cur_level)) {
  4653. return at::_ops::diagonal_Dimname::call(self, outdim, dim1, dim2, offset);
  4654. }
  4655. Tensor self_value;
  4656. optional<int64_t> self_bdim;
  4657. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4658. auto results = batch_rule(self_value, self_bdim, outdim, dim1, dim2, offset);
  4659. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  4660. }
  4661. template <typename batch_rule_t, batch_rule_t batch_rule>
  4662. at::Tensor diagonal_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) {
  4663. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4664. auto maybe_layer = maybeCurrentDynamicLayer();
  4665. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4666. int64_t cur_level = maybe_layer->layerId();
  4667. if (!isBatchedAtLevel(grad_output, cur_level)) {
  4668. return at::_ops::diagonal_backward::call(grad_output, input_sizes, offset, dim1, dim2);
  4669. }
  4670. Tensor grad_output_value;
  4671. optional<int64_t> grad_output_bdim;
  4672. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  4673. auto results = batch_rule(grad_output_value, grad_output_bdim, input_sizes, offset, dim1, dim2);
  4674. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  4675. }
  4676. template <typename batch_rule_t, batch_rule_t batch_rule>
  4677. at::Tensor & fill_diagonal__generated_plumbing(at::Tensor & self, const at::Scalar & fill_value, bool wrap) {
  4678. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4679. auto maybe_layer = maybeCurrentDynamicLayer();
  4680. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  4681. int64_t cur_level = maybe_layer->layerId();
  4682. if (!isBatchedAtLevel(self, cur_level)) {
  4683. return at::_ops::fill_diagonal_::call(self, fill_value, wrap);
  4684. }
  4685. Tensor self_value;
  4686. optional<int64_t> self_bdim;
  4687. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4688. batch_rule(self_value, self_bdim, fill_value, wrap);
  4689. return self;
  4690. }
  4691. template <typename batch_rule_t, batch_rule_t batch_rule>
  4692. at::Tensor diff_generated_plumbing(const at::Tensor & self, int64_t n, int64_t dim, const c10::optional<at::Tensor> & prepend, const c10::optional<at::Tensor> & append) {
  4693. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4694. auto maybe_layer = maybeCurrentDynamicLayer();
  4695. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4696. int64_t cur_level = maybe_layer->layerId();
  4697. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(prepend, cur_level) && !isBatchedAtLevel(append, cur_level)) {
  4698. return at::_ops::diff::call(self, n, dim, prepend, append);
  4699. }
  4700. Tensor self_value;
  4701. optional<int64_t> self_bdim;
  4702. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4703. optional<Tensor> prepend_value;
  4704. optional<int64_t> prepend_bdim;
  4705. if (prepend) {
  4706. std::tie(prepend_value, prepend_bdim) = unwrapTensorAtLevel(prepend.value(), cur_level);
  4707. }
  4708. optional<Tensor> append_value;
  4709. optional<int64_t> append_bdim;
  4710. if (append) {
  4711. std::tie(append_value, append_bdim) = unwrapTensorAtLevel(append.value(), cur_level);
  4712. }
  4713. auto results = batch_rule(self_value, self_bdim, n, dim, prepend_value, prepend_bdim, append_value, append_bdim);
  4714. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  4715. }
  4716. template <typename batch_rule_t, batch_rule_t batch_rule>
  4717. ::std::vector<at::Tensor> gradient_scalarint_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & spacing, c10::optional<int64_t> dim, int64_t edge_order) {
  4718. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4719. auto maybe_layer = maybeCurrentDynamicLayer();
  4720. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4721. int64_t cur_level = maybe_layer->layerId();
  4722. if (!isBatchedAtLevel(self, cur_level)) {
  4723. return at::_ops::gradient_scalarint::call(self, spacing, dim, edge_order);
  4724. }
  4725. Tensor self_value;
  4726. optional<int64_t> self_bdim;
  4727. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4728. auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order);
  4729. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  4730. }
  4731. template <typename batch_rule_t, batch_rule_t batch_rule>
  4732. ::std::vector<at::Tensor> gradient_scalararray_generated_plumbing(const at::Tensor & self, const at::Scalar & spacing, at::IntArrayRef dim, int64_t edge_order) {
  4733. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4734. auto maybe_layer = maybeCurrentDynamicLayer();
  4735. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4736. int64_t cur_level = maybe_layer->layerId();
  4737. if (!isBatchedAtLevel(self, cur_level)) {
  4738. return at::_ops::gradient_scalararray::call(self, spacing, dim, edge_order);
  4739. }
  4740. Tensor self_value;
  4741. optional<int64_t> self_bdim;
  4742. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4743. auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order);
  4744. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  4745. }
  4746. template <typename batch_rule_t, batch_rule_t batch_rule>
  4747. ::std::vector<at::Tensor> gradient_array_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, int64_t edge_order) {
  4748. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4749. auto maybe_layer = maybeCurrentDynamicLayer();
  4750. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4751. int64_t cur_level = maybe_layer->layerId();
  4752. if (!isBatchedAtLevel(self, cur_level)) {
  4753. return at::_ops::gradient_array::call(self, dim, edge_order);
  4754. }
  4755. Tensor self_value;
  4756. optional<int64_t> self_bdim;
  4757. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4758. auto results = batch_rule(self_value, self_bdim, dim, edge_order);
  4759. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  4760. }
  4761. template <typename batch_rule_t, batch_rule_t batch_rule>
  4762. ::std::vector<at::Tensor> gradient_scalarrayint_generated_plumbing(const at::Tensor & self, at::ArrayRef<at::Scalar> spacing, c10::optional<int64_t> dim, int64_t edge_order) {
  4763. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4764. auto maybe_layer = maybeCurrentDynamicLayer();
  4765. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4766. int64_t cur_level = maybe_layer->layerId();
  4767. if (!isBatchedAtLevel(self, cur_level)) {
  4768. return at::_ops::gradient_scalarrayint::call(self, spacing, dim, edge_order);
  4769. }
  4770. Tensor self_value;
  4771. optional<int64_t> self_bdim;
  4772. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4773. auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order);
  4774. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  4775. }
  4776. template <typename batch_rule_t, batch_rule_t batch_rule>
  4777. ::std::vector<at::Tensor> gradient_scalarrayarray_generated_plumbing(const at::Tensor & self, at::ArrayRef<at::Scalar> spacing, at::IntArrayRef dim, int64_t edge_order) {
  4778. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4779. auto maybe_layer = maybeCurrentDynamicLayer();
  4780. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4781. int64_t cur_level = maybe_layer->layerId();
  4782. if (!isBatchedAtLevel(self, cur_level)) {
  4783. return at::_ops::gradient_scalarrayarray::call(self, spacing, dim, edge_order);
  4784. }
  4785. Tensor self_value;
  4786. optional<int64_t> self_bdim;
  4787. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4788. auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order);
  4789. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  4790. }
  4791. template <typename batch_rule_t, batch_rule_t batch_rule>
  4792. ::std::vector<at::Tensor> gradient_tensorarrayint_generated_plumbing(const at::Tensor & self, at::TensorList spacing, c10::optional<int64_t> dim, int64_t edge_order) {
  4793. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4794. auto maybe_layer = maybeCurrentDynamicLayer();
  4795. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4796. int64_t cur_level = maybe_layer->layerId();
  4797. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(spacing, cur_level)) {
  4798. return at::_ops::gradient_tensorarrayint::call(self, spacing, dim, edge_order);
  4799. }
  4800. Tensor self_value;
  4801. optional<int64_t> self_bdim;
  4802. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4803. auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order);
  4804. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  4805. }
  4806. template <typename batch_rule_t, batch_rule_t batch_rule>
  4807. ::std::vector<at::Tensor> gradient_tensorarray_generated_plumbing(const at::Tensor & self, at::TensorList spacing, at::IntArrayRef dim, int64_t edge_order) {
  4808. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4809. auto maybe_layer = maybeCurrentDynamicLayer();
  4810. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4811. int64_t cur_level = maybe_layer->layerId();
  4812. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(spacing, cur_level)) {
  4813. return at::_ops::gradient_tensorarray::call(self, spacing, dim, edge_order);
  4814. }
  4815. Tensor self_value;
  4816. optional<int64_t> self_bdim;
  4817. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4818. auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order);
  4819. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  4820. }
  4821. template <typename batch_rule_t, batch_rule_t batch_rule>
  4822. at::Tensor div_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  4823. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4824. auto maybe_layer = maybeCurrentDynamicLayer();
  4825. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4826. int64_t cur_level = maybe_layer->layerId();
  4827. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  4828. return at::_ops::div_Tensor::call(self, other);
  4829. }
  4830. Tensor self_value;
  4831. optional<int64_t> self_bdim;
  4832. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4833. Tensor other_value;
  4834. optional<int64_t> other_bdim;
  4835. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  4836. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  4837. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  4838. }
  4839. template <typename batch_rule_t, batch_rule_t batch_rule>
  4840. at::Tensor & div__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  4841. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4842. auto maybe_layer = maybeCurrentDynamicLayer();
  4843. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  4844. int64_t cur_level = maybe_layer->layerId();
  4845. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  4846. return at::_ops::div__Tensor::call(self, other);
  4847. }
  4848. Tensor self_value;
  4849. optional<int64_t> self_bdim;
  4850. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4851. Tensor other_value;
  4852. optional<int64_t> other_bdim;
  4853. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  4854. batch_rule(self_value, self_bdim, other_value, other_bdim);
  4855. return self;
  4856. }
  4857. template <typename batch_rule_t, batch_rule_t batch_rule>
  4858. at::Tensor div_Tensor_mode_generated_plumbing(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
  4859. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4860. auto maybe_layer = maybeCurrentDynamicLayer();
  4861. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4862. int64_t cur_level = maybe_layer->layerId();
  4863. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  4864. return at::_ops::div_Tensor_mode::call(self, other, rounding_mode);
  4865. }
  4866. Tensor self_value;
  4867. optional<int64_t> self_bdim;
  4868. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4869. Tensor other_value;
  4870. optional<int64_t> other_bdim;
  4871. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  4872. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, rounding_mode);
  4873. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  4874. }
  4875. template <typename batch_rule_t, batch_rule_t batch_rule>
  4876. at::Tensor & div__Tensor_mode_generated_plumbing(at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
  4877. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4878. auto maybe_layer = maybeCurrentDynamicLayer();
  4879. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  4880. int64_t cur_level = maybe_layer->layerId();
  4881. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  4882. return at::_ops::div__Tensor_mode::call(self, other, rounding_mode);
  4883. }
  4884. Tensor self_value;
  4885. optional<int64_t> self_bdim;
  4886. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4887. Tensor other_value;
  4888. optional<int64_t> other_bdim;
  4889. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  4890. batch_rule(self_value, self_bdim, other_value, other_bdim, rounding_mode);
  4891. return self;
  4892. }
  4893. template <typename batch_rule_t, batch_rule_t batch_rule>
  4894. at::Tensor div_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
  4895. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4896. auto maybe_layer = maybeCurrentDynamicLayer();
  4897. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4898. int64_t cur_level = maybe_layer->layerId();
  4899. if (!isBatchedAtLevel(self, cur_level)) {
  4900. return at::_ops::div_Scalar::call(self, other);
  4901. }
  4902. Tensor self_value;
  4903. optional<int64_t> self_bdim;
  4904. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4905. auto results = batch_rule(self_value, self_bdim, other);
  4906. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  4907. }
  4908. template <typename batch_rule_t, batch_rule_t batch_rule>
  4909. at::Tensor & div__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
  4910. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4911. auto maybe_layer = maybeCurrentDynamicLayer();
  4912. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  4913. int64_t cur_level = maybe_layer->layerId();
  4914. if (!isBatchedAtLevel(self, cur_level)) {
  4915. return at::_ops::div__Scalar::call(self, other);
  4916. }
  4917. Tensor self_value;
  4918. optional<int64_t> self_bdim;
  4919. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4920. batch_rule(self_value, self_bdim, other);
  4921. return self;
  4922. }
  4923. template <typename batch_rule_t, batch_rule_t batch_rule>
  4924. at::Tensor div_Scalar_mode_generated_plumbing(const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) {
  4925. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4926. auto maybe_layer = maybeCurrentDynamicLayer();
  4927. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4928. int64_t cur_level = maybe_layer->layerId();
  4929. if (!isBatchedAtLevel(self, cur_level)) {
  4930. return at::_ops::div_Scalar_mode::call(self, other, rounding_mode);
  4931. }
  4932. Tensor self_value;
  4933. optional<int64_t> self_bdim;
  4934. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4935. auto results = batch_rule(self_value, self_bdim, other, rounding_mode);
  4936. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  4937. }
  4938. template <typename batch_rule_t, batch_rule_t batch_rule>
  4939. at::Tensor & div__Scalar_mode_generated_plumbing(at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) {
  4940. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4941. auto maybe_layer = maybeCurrentDynamicLayer();
  4942. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  4943. int64_t cur_level = maybe_layer->layerId();
  4944. if (!isBatchedAtLevel(self, cur_level)) {
  4945. return at::_ops::div__Scalar_mode::call(self, other, rounding_mode);
  4946. }
  4947. Tensor self_value;
  4948. optional<int64_t> self_bdim;
  4949. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4950. batch_rule(self_value, self_bdim, other, rounding_mode);
  4951. return self;
  4952. }
  4953. template <typename batch_rule_t, batch_rule_t batch_rule>
  4954. at::Tensor divide_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  4955. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4956. auto maybe_layer = maybeCurrentDynamicLayer();
  4957. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4958. int64_t cur_level = maybe_layer->layerId();
  4959. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  4960. return at::_ops::divide_Tensor::call(self, other);
  4961. }
  4962. Tensor self_value;
  4963. optional<int64_t> self_bdim;
  4964. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4965. Tensor other_value;
  4966. optional<int64_t> other_bdim;
  4967. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  4968. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  4969. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  4970. }
  4971. template <typename batch_rule_t, batch_rule_t batch_rule>
  4972. at::Tensor & divide__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  4973. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4974. auto maybe_layer = maybeCurrentDynamicLayer();
  4975. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  4976. int64_t cur_level = maybe_layer->layerId();
  4977. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  4978. return at::_ops::divide__Tensor::call(self, other);
  4979. }
  4980. Tensor self_value;
  4981. optional<int64_t> self_bdim;
  4982. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  4983. Tensor other_value;
  4984. optional<int64_t> other_bdim;
  4985. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  4986. batch_rule(self_value, self_bdim, other_value, other_bdim);
  4987. return self;
  4988. }
  4989. template <typename batch_rule_t, batch_rule_t batch_rule>
  4990. at::Tensor divide_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
  4991. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  4992. auto maybe_layer = maybeCurrentDynamicLayer();
  4993. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  4994. int64_t cur_level = maybe_layer->layerId();
  4995. if (!isBatchedAtLevel(self, cur_level)) {
  4996. return at::_ops::divide_Scalar::call(self, other);
  4997. }
  4998. Tensor self_value;
  4999. optional<int64_t> self_bdim;
  5000. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  5001. auto results = batch_rule(self_value, self_bdim, other);
  5002. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  5003. }
  5004. template <typename batch_rule_t, batch_rule_t batch_rule>
  5005. at::Tensor & divide__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
  5006. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5007. auto maybe_layer = maybeCurrentDynamicLayer();
  5008. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  5009. int64_t cur_level = maybe_layer->layerId();
  5010. if (!isBatchedAtLevel(self, cur_level)) {
  5011. return at::_ops::divide__Scalar::call(self, other);
  5012. }
  5013. Tensor self_value;
  5014. optional<int64_t> self_bdim;
  5015. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  5016. batch_rule(self_value, self_bdim, other);
  5017. return self;
  5018. }
  5019. template <typename batch_rule_t, batch_rule_t batch_rule>
  5020. at::Tensor divide_Tensor_mode_generated_plumbing(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
  5021. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5022. auto maybe_layer = maybeCurrentDynamicLayer();
  5023. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5024. int64_t cur_level = maybe_layer->layerId();
  5025. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  5026. return at::_ops::divide_Tensor_mode::call(self, other, rounding_mode);
  5027. }
  5028. Tensor self_value;
  5029. optional<int64_t> self_bdim;
  5030. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  5031. Tensor other_value;
  5032. optional<int64_t> other_bdim;
  5033. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  5034. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, rounding_mode);
  5035. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  5036. }
  5037. template <typename batch_rule_t, batch_rule_t batch_rule>
  5038. at::Tensor & divide__Tensor_mode_generated_plumbing(at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
  5039. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5040. auto maybe_layer = maybeCurrentDynamicLayer();
  5041. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  5042. int64_t cur_level = maybe_layer->layerId();
  5043. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  5044. return at::_ops::divide__Tensor_mode::call(self, other, rounding_mode);
  5045. }
  5046. Tensor self_value;
  5047. optional<int64_t> self_bdim;
  5048. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  5049. Tensor other_value;
  5050. optional<int64_t> other_bdim;
  5051. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  5052. batch_rule(self_value, self_bdim, other_value, other_bdim, rounding_mode);
  5053. return self;
  5054. }
  5055. template <typename batch_rule_t, batch_rule_t batch_rule>
  5056. at::Tensor divide_Scalar_mode_generated_plumbing(const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) {
  5057. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5058. auto maybe_layer = maybeCurrentDynamicLayer();
  5059. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5060. int64_t cur_level = maybe_layer->layerId();
  5061. if (!isBatchedAtLevel(self, cur_level)) {
  5062. return at::_ops::divide_Scalar_mode::call(self, other, rounding_mode);
  5063. }
  5064. Tensor self_value;
  5065. optional<int64_t> self_bdim;
  5066. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  5067. auto results = batch_rule(self_value, self_bdim, other, rounding_mode);
  5068. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  5069. }
  5070. template <typename batch_rule_t, batch_rule_t batch_rule>
  5071. at::Tensor & divide__Scalar_mode_generated_plumbing(at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) {
  5072. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5073. auto maybe_layer = maybeCurrentDynamicLayer();
  5074. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  5075. int64_t cur_level = maybe_layer->layerId();
  5076. if (!isBatchedAtLevel(self, cur_level)) {
  5077. return at::_ops::divide__Scalar_mode::call(self, other, rounding_mode);
  5078. }
  5079. Tensor self_value;
  5080. optional<int64_t> self_bdim;
  5081. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  5082. batch_rule(self_value, self_bdim, other, rounding_mode);
  5083. return self;
  5084. }
  5085. template <typename batch_rule_t, batch_rule_t batch_rule>
  5086. at::Tensor true_divide_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  5087. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5088. auto maybe_layer = maybeCurrentDynamicLayer();
  5089. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5090. int64_t cur_level = maybe_layer->layerId();
  5091. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  5092. return at::_ops::true_divide_Tensor::call(self, other);
  5093. }
  5094. Tensor self_value;
  5095. optional<int64_t> self_bdim;
  5096. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  5097. Tensor other_value;
  5098. optional<int64_t> other_bdim;
  5099. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  5100. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  5101. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  5102. }
  5103. template <typename batch_rule_t, batch_rule_t batch_rule>
  5104. at::Tensor & true_divide__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  5105. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5106. auto maybe_layer = maybeCurrentDynamicLayer();
  5107. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  5108. int64_t cur_level = maybe_layer->layerId();
  5109. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  5110. return at::_ops::true_divide__Tensor::call(self, other);
  5111. }
  5112. Tensor self_value;
  5113. optional<int64_t> self_bdim;
  5114. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  5115. Tensor other_value;
  5116. optional<int64_t> other_bdim;
  5117. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  5118. batch_rule(self_value, self_bdim, other_value, other_bdim);
  5119. return self;
  5120. }
  5121. template <typename batch_rule_t, batch_rule_t batch_rule>
  5122. at::Tensor true_divide_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
  5123. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5124. auto maybe_layer = maybeCurrentDynamicLayer();
  5125. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5126. int64_t cur_level = maybe_layer->layerId();
  5127. if (!isBatchedAtLevel(self, cur_level)) {
  5128. return at::_ops::true_divide_Scalar::call(self, other);
  5129. }
  5130. Tensor self_value;
  5131. optional<int64_t> self_bdim;
  5132. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  5133. auto results = batch_rule(self_value, self_bdim, other);
  5134. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  5135. }
  5136. template <typename batch_rule_t, batch_rule_t batch_rule>
  5137. at::Tensor & true_divide__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
  5138. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5139. auto maybe_layer = maybeCurrentDynamicLayer();
  5140. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  5141. int64_t cur_level = maybe_layer->layerId();
  5142. if (!isBatchedAtLevel(self, cur_level)) {
  5143. return at::_ops::true_divide__Scalar::call(self, other);
  5144. }
  5145. Tensor self_value;
  5146. optional<int64_t> self_bdim;
  5147. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  5148. batch_rule(self_value, self_bdim, other);
  5149. return self;
  5150. }
  5151. template <typename batch_rule_t, batch_rule_t batch_rule>
  5152. at::Tensor dot_generated_plumbing(const at::Tensor & self, const at::Tensor & tensor) {
  5153. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5154. auto maybe_layer = maybeCurrentDynamicLayer();
  5155. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5156. int64_t cur_level = maybe_layer->layerId();
  5157. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor, cur_level)) {
  5158. return at::_ops::dot::call(self, tensor);
  5159. }
  5160. Tensor self_value;
  5161. optional<int64_t> self_bdim;
  5162. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  5163. Tensor tensor_value;
  5164. optional<int64_t> tensor_bdim;
  5165. std::tie(tensor_value, tensor_bdim) = unwrapTensorAtLevel(tensor, cur_level);
  5166. auto results = batch_rule(self_value, self_bdim, tensor_value, tensor_bdim);
  5167. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  5168. }
  5169. template <typename batch_rule_t, batch_rule_t batch_rule>
  5170. at::Tensor vdot_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  5171. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5172. auto maybe_layer = maybeCurrentDynamicLayer();
  5173. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5174. int64_t cur_level = maybe_layer->layerId();
  5175. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  5176. return at::_ops::vdot::call(self, other);
  5177. }
  5178. Tensor self_value;
  5179. optional<int64_t> self_bdim;
  5180. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  5181. Tensor other_value;
  5182. optional<int64_t> other_bdim;
  5183. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  5184. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  5185. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  5186. }
  5187. template <typename batch_rule_t, batch_rule_t batch_rule>
  5188. at::Tensor einsum_generated_plumbing(c10::string_view equation, at::TensorList tensors, at::OptionalIntArrayRef path) {
  5189. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5190. auto maybe_layer = maybeCurrentDynamicLayer();
  5191. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5192. int64_t cur_level = maybe_layer->layerId();
  5193. if (!isBatchedAtLevel(tensors, cur_level)) {
  5194. return at::_ops::einsum::call(equation, tensors, path);
  5195. }
  5196. auto results = batch_rule(equation, tensors, path);
  5197. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  5198. }
  5199. template <typename batch_rule_t, batch_rule_t batch_rule>
  5200. at::Tensor embedding_generated_plumbing(const at::Tensor & weight, const at::Tensor & indices, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse) {
  5201. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5202. auto maybe_layer = maybeCurrentDynamicLayer();
  5203. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5204. int64_t cur_level = maybe_layer->layerId();
  5205. if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
  5206. return at::_ops::embedding::call(weight, indices, padding_idx, scale_grad_by_freq, sparse);
  5207. }
  5208. Tensor weight_value;
  5209. optional<int64_t> weight_bdim;
  5210. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  5211. Tensor indices_value;
  5212. optional<int64_t> indices_bdim;
  5213. std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
  5214. auto results = batch_rule(weight_value, weight_bdim, indices_value, indices_bdim, padding_idx, scale_grad_by_freq, sparse);
  5215. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  5216. }
  5217. template <typename batch_rule_t, batch_rule_t batch_rule>
  5218. at::Tensor embedding_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse) {
  5219. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5220. auto maybe_layer = maybeCurrentDynamicLayer();
  5221. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5222. int64_t cur_level = maybe_layer->layerId();
  5223. if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
  5224. return at::_ops::embedding_backward::call(grad, indices, num_weights, padding_idx, scale_grad_by_freq, sparse);
  5225. }
  5226. Tensor grad_value;
  5227. optional<int64_t> grad_bdim;
  5228. std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
  5229. Tensor indices_value;
  5230. optional<int64_t> indices_bdim;
  5231. std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
  5232. auto results = batch_rule(grad_value, grad_bdim, indices_value, indices_bdim, num_weights, padding_idx, scale_grad_by_freq, sparse);
  5233. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  5234. }
  5235. template <typename batch_rule_t, batch_rule_t batch_rule>
  5236. at::Tensor embedding_dense_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq) {
  5237. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5238. auto maybe_layer = maybeCurrentDynamicLayer();
  5239. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5240. int64_t cur_level = maybe_layer->layerId();
  5241. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
  5242. return at::_ops::embedding_dense_backward::call(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq);
  5243. }
  5244. Tensor grad_output_value;
  5245. optional<int64_t> grad_output_bdim;
  5246. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  5247. Tensor indices_value;
  5248. optional<int64_t> indices_bdim;
  5249. std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
  5250. auto results = batch_rule(grad_output_value, grad_output_bdim, indices_value, indices_bdim, num_weights, padding_idx, scale_grad_by_freq);
  5251. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  5252. }
  5253. template <typename batch_rule_t, batch_rule_t batch_rule>
  5254. at::Tensor & embedding_renorm__generated_plumbing(at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) {
  5255. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5256. auto maybe_layer = maybeCurrentDynamicLayer();
  5257. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  5258. int64_t cur_level = maybe_layer->layerId();
  5259. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
  5260. return at::_ops::embedding_renorm_::call(self, indices, max_norm, norm_type);
  5261. }
  5262. Tensor self_value;
  5263. optional<int64_t> self_bdim;
  5264. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  5265. Tensor indices_value;
  5266. optional<int64_t> indices_bdim;
  5267. std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
  5268. batch_rule(self_value, self_bdim, indices_value, indices_bdim, max_norm, norm_type);
  5269. return self;
  5270. }
  5271. template <typename batch_rule_t, batch_rule_t batch_rule>
  5272. at::Tensor embedding_sparse_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) {
  5273. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5274. auto maybe_layer = maybeCurrentDynamicLayer();
  5275. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5276. int64_t cur_level = maybe_layer->layerId();
  5277. if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
  5278. return at::_ops::embedding_sparse_backward::call(grad, indices, num_weights, padding_idx, scale_grad_by_freq);
  5279. }
  5280. Tensor grad_value;
  5281. optional<int64_t> grad_bdim;
  5282. std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
  5283. Tensor indices_value;
  5284. optional<int64_t> indices_bdim;
  5285. std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
  5286. auto results = batch_rule(grad_value, grad_bdim, indices_value, indices_bdim, num_weights, padding_idx, scale_grad_by_freq);
  5287. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  5288. }
  5289. template <typename batch_rule_t, batch_rule_t batch_rule>
  5290. ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _embedding_bag_forward_only_generated_plumbing(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx) {
  5291. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5292. auto maybe_layer = maybeCurrentDynamicLayer();
  5293. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5294. int64_t cur_level = maybe_layer->layerId();
  5295. if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) {
  5296. return at::_ops::_embedding_bag_forward_only::call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx);
  5297. }
  5298. Tensor weight_value;
  5299. optional<int64_t> weight_bdim;
  5300. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  5301. Tensor indices_value;
  5302. optional<int64_t> indices_bdim;
  5303. std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
  5304. Tensor offsets_value;
  5305. optional<int64_t> offsets_bdim;
  5306. std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level);
  5307. optional<Tensor> per_sample_weights_value;
  5308. optional<int64_t> per_sample_weights_bdim;
  5309. if (per_sample_weights) {
  5310. std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level);
  5311. }
  5312. auto results = batch_rule(weight_value, weight_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, scale_grad_by_freq, mode, sparse, per_sample_weights_value, per_sample_weights_bdim, include_last_offset, padding_idx);
  5313. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
  5314. }
  5315. template <typename batch_rule_t, batch_rule_t batch_rule>
  5316. ::std::tuple<at::Tensor,at::Tensor> _rowwise_prune_generated_plumbing(const at::Tensor & weight, const at::Tensor & mask, at::ScalarType compressed_indices_dtype) {
  5317. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5318. auto maybe_layer = maybeCurrentDynamicLayer();
  5319. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5320. int64_t cur_level = maybe_layer->layerId();
  5321. if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
  5322. return at::_ops::_rowwise_prune::call(weight, mask, compressed_indices_dtype);
  5323. }
  5324. Tensor weight_value;
  5325. optional<int64_t> weight_bdim;
  5326. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  5327. Tensor mask_value;
  5328. optional<int64_t> mask_bdim;
  5329. std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
  5330. auto results = batch_rule(weight_value, weight_bdim, mask_value, mask_bdim, compressed_indices_dtype);
  5331. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  5332. }
  5333. template <typename batch_rule_t, batch_rule_t batch_rule>
  5334. at::Tensor row_stack_generated_plumbing(at::TensorList tensors) {
  5335. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5336. auto maybe_layer = maybeCurrentDynamicLayer();
  5337. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5338. int64_t cur_level = maybe_layer->layerId();
  5339. if (!isBatchedAtLevel(tensors, cur_level)) {
  5340. return at::_ops::row_stack::call(tensors);
  5341. }
  5342. auto results = batch_rule(tensors);
  5343. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  5344. }
  5345. template <typename batch_rule_t, batch_rule_t batch_rule>
  5346. ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> embedding_bag_generated_plumbing(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset) {
  5347. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5348. auto maybe_layer = maybeCurrentDynamicLayer();
  5349. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5350. int64_t cur_level = maybe_layer->layerId();
  5351. if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) {
  5352. return at::_ops::embedding_bag::call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset);
  5353. }
  5354. Tensor weight_value;
  5355. optional<int64_t> weight_bdim;
  5356. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  5357. Tensor indices_value;
  5358. optional<int64_t> indices_bdim;
  5359. std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
  5360. Tensor offsets_value;
  5361. optional<int64_t> offsets_bdim;
  5362. std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level);
  5363. optional<Tensor> per_sample_weights_value;
  5364. optional<int64_t> per_sample_weights_bdim;
  5365. if (per_sample_weights) {
  5366. std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level);
  5367. }
  5368. auto results = batch_rule(weight_value, weight_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, scale_grad_by_freq, mode, sparse, per_sample_weights_value, per_sample_weights_bdim, include_last_offset);
  5369. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
  5370. }
  5371. template <typename batch_rule_t, batch_rule_t batch_rule>
  5372. ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> embedding_bag_padding_idx_generated_plumbing(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, c10::optional<int64_t> padding_idx) {
  5373. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5374. auto maybe_layer = maybeCurrentDynamicLayer();
  5375. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5376. int64_t cur_level = maybe_layer->layerId();
  5377. if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) {
  5378. return at::_ops::embedding_bag_padding_idx::call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx);
  5379. }
  5380. Tensor weight_value;
  5381. optional<int64_t> weight_bdim;
  5382. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  5383. Tensor indices_value;
  5384. optional<int64_t> indices_bdim;
  5385. std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
  5386. Tensor offsets_value;
  5387. optional<int64_t> offsets_bdim;
  5388. std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level);
  5389. optional<Tensor> per_sample_weights_value;
  5390. optional<int64_t> per_sample_weights_bdim;
  5391. if (per_sample_weights) {
  5392. std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level);
  5393. }
  5394. auto results = batch_rule(weight_value, weight_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, scale_grad_by_freq, mode, sparse, per_sample_weights_value, per_sample_weights_bdim, include_last_offset, padding_idx);
  5395. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
  5396. }
  5397. template <typename batch_rule_t, batch_rule_t batch_rule>
  5398. ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _embedding_bag_generated_plumbing(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx) {
  5399. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5400. auto maybe_layer = maybeCurrentDynamicLayer();
  5401. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5402. int64_t cur_level = maybe_layer->layerId();
  5403. if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) {
  5404. return at::_ops::_embedding_bag::call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx);
  5405. }
  5406. Tensor weight_value;
  5407. optional<int64_t> weight_bdim;
  5408. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  5409. Tensor indices_value;
  5410. optional<int64_t> indices_bdim;
  5411. std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
  5412. Tensor offsets_value;
  5413. optional<int64_t> offsets_bdim;
  5414. std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level);
  5415. optional<Tensor> per_sample_weights_value;
  5416. optional<int64_t> per_sample_weights_bdim;
  5417. if (per_sample_weights) {
  5418. std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level);
  5419. }
  5420. auto results = batch_rule(weight_value, weight_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, scale_grad_by_freq, mode, sparse, per_sample_weights_value, per_sample_weights_bdim, include_last_offset, padding_idx);
  5421. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
  5422. }
  5423. template <typename batch_rule_t, batch_rule_t batch_rule>
  5424. at::Tensor _embedding_bag_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) {
  5425. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5426. auto maybe_layer = maybeCurrentDynamicLayer();
  5427. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5428. int64_t cur_level = maybe_layer->layerId();
  5429. if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(offset2bag, cur_level) && !isBatchedAtLevel(bag_size, cur_level) && !isBatchedAtLevel(maximum_indices, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) {
  5430. return at::_ops::_embedding_bag_backward::call(grad, indices, offsets, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights, padding_idx);
  5431. }
  5432. Tensor grad_value;
  5433. optional<int64_t> grad_bdim;
  5434. std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
  5435. Tensor indices_value;
  5436. optional<int64_t> indices_bdim;
  5437. std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
  5438. Tensor offsets_value;
  5439. optional<int64_t> offsets_bdim;
  5440. std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level);
  5441. Tensor offset2bag_value;
  5442. optional<int64_t> offset2bag_bdim;
  5443. std::tie(offset2bag_value, offset2bag_bdim) = unwrapTensorAtLevel(offset2bag, cur_level);
  5444. Tensor bag_size_value;
  5445. optional<int64_t> bag_size_bdim;
  5446. std::tie(bag_size_value, bag_size_bdim) = unwrapTensorAtLevel(bag_size, cur_level);
  5447. Tensor maximum_indices_value;
  5448. optional<int64_t> maximum_indices_bdim;
  5449. std::tie(maximum_indices_value, maximum_indices_bdim) = unwrapTensorAtLevel(maximum_indices, cur_level);
  5450. optional<Tensor> per_sample_weights_value;
  5451. optional<int64_t> per_sample_weights_bdim;
  5452. if (per_sample_weights) {
  5453. std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level);
  5454. }
  5455. auto results = batch_rule(grad_value, grad_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, offset2bag_value, offset2bag_bdim, bag_size_value, bag_size_bdim, maximum_indices_value, maximum_indices_bdim, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights_value, per_sample_weights_bdim, padding_idx);
  5456. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  5457. }
  5458. template <typename batch_rule_t, batch_rule_t batch_rule>
  5459. at::Tensor _embedding_bag_sparse_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) {
  5460. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5461. auto maybe_layer = maybeCurrentDynamicLayer();
  5462. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5463. int64_t cur_level = maybe_layer->layerId();
  5464. if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(offset2bag, cur_level) && !isBatchedAtLevel(bag_size, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) {
  5465. return at::_ops::_embedding_bag_sparse_backward::call(grad, indices, offsets, offset2bag, bag_size, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx);
  5466. }
  5467. Tensor grad_value;
  5468. optional<int64_t> grad_bdim;
  5469. std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
  5470. Tensor indices_value;
  5471. optional<int64_t> indices_bdim;
  5472. std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
  5473. Tensor offsets_value;
  5474. optional<int64_t> offsets_bdim;
  5475. std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level);
  5476. Tensor offset2bag_value;
  5477. optional<int64_t> offset2bag_bdim;
  5478. std::tie(offset2bag_value, offset2bag_bdim) = unwrapTensorAtLevel(offset2bag, cur_level);
  5479. Tensor bag_size_value;
  5480. optional<int64_t> bag_size_bdim;
  5481. std::tie(bag_size_value, bag_size_bdim) = unwrapTensorAtLevel(bag_size, cur_level);
  5482. optional<Tensor> per_sample_weights_value;
  5483. optional<int64_t> per_sample_weights_bdim;
  5484. if (per_sample_weights) {
  5485. std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level);
  5486. }
  5487. auto results = batch_rule(grad_value, grad_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, offset2bag_value, offset2bag_bdim, bag_size_value, bag_size_bdim, num_weights, scale_grad_by_freq, mode, per_sample_weights_value, per_sample_weights_bdim, padding_idx);
  5488. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  5489. }
  5490. template <typename batch_rule_t, batch_rule_t batch_rule>
  5491. at::Tensor _embedding_bag_dense_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) {
  5492. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5493. auto maybe_layer = maybeCurrentDynamicLayer();
  5494. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5495. int64_t cur_level = maybe_layer->layerId();
  5496. if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offset2bag, cur_level) && !isBatchedAtLevel(bag_size, cur_level) && !isBatchedAtLevel(maximum_indices, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) {
  5497. return at::_ops::_embedding_bag_dense_backward::call(grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx);
  5498. }
  5499. Tensor grad_value;
  5500. optional<int64_t> grad_bdim;
  5501. std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
  5502. Tensor indices_value;
  5503. optional<int64_t> indices_bdim;
  5504. std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
  5505. Tensor offset2bag_value;
  5506. optional<int64_t> offset2bag_bdim;
  5507. std::tie(offset2bag_value, offset2bag_bdim) = unwrapTensorAtLevel(offset2bag, cur_level);
  5508. Tensor bag_size_value;
  5509. optional<int64_t> bag_size_bdim;
  5510. std::tie(bag_size_value, bag_size_bdim) = unwrapTensorAtLevel(bag_size, cur_level);
  5511. Tensor maximum_indices_value;
  5512. optional<int64_t> maximum_indices_bdim;
  5513. std::tie(maximum_indices_value, maximum_indices_bdim) = unwrapTensorAtLevel(maximum_indices, cur_level);
  5514. optional<Tensor> per_sample_weights_value;
  5515. optional<int64_t> per_sample_weights_bdim;
  5516. if (per_sample_weights) {
  5517. std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level);
  5518. }
  5519. auto results = batch_rule(grad_value, grad_bdim, indices_value, indices_bdim, offset2bag_value, offset2bag_bdim, bag_size_value, bag_size_bdim, maximum_indices_value, maximum_indices_bdim, num_weights, scale_grad_by_freq, mode, per_sample_weights_value, per_sample_weights_bdim, padding_idx);
  5520. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  5521. }
  5522. template <typename batch_rule_t, batch_rule_t batch_rule>
  5523. at::Tensor _embedding_bag_per_sample_weights_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx) {
  5524. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5525. auto maybe_layer = maybeCurrentDynamicLayer();
  5526. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5527. int64_t cur_level = maybe_layer->layerId();
  5528. if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(offset2bag, cur_level)) {
  5529. return at::_ops::_embedding_bag_per_sample_weights_backward::call(grad, weight, indices, offsets, offset2bag, mode, padding_idx);
  5530. }
  5531. Tensor grad_value;
  5532. optional<int64_t> grad_bdim;
  5533. std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
  5534. Tensor weight_value;
  5535. optional<int64_t> weight_bdim;
  5536. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  5537. Tensor indices_value;
  5538. optional<int64_t> indices_bdim;
  5539. std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
  5540. Tensor offsets_value;
  5541. optional<int64_t> offsets_bdim;
  5542. std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level);
  5543. Tensor offset2bag_value;
  5544. optional<int64_t> offset2bag_bdim;
  5545. std::tie(offset2bag_value, offset2bag_bdim) = unwrapTensorAtLevel(offset2bag, cur_level);
  5546. auto results = batch_rule(grad_value, grad_bdim, weight_value, weight_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, offset2bag_value, offset2bag_bdim, mode, padding_idx);
  5547. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  5548. }
  5549. template <typename batch_rule_t, batch_rule_t batch_rule>
  5550. at::Tensor new_empty_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
  5551. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5552. auto maybe_layer = maybeCurrentDynamicLayer();
  5553. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5554. int64_t cur_level = maybe_layer->layerId();
  5555. if (!isBatchedAtLevel(self, cur_level)) {
  5556. return at::_ops::new_empty::call(self, size, dtype, layout, device, pin_memory);
  5557. }
  5558. Tensor self_value;
  5559. optional<int64_t> self_bdim;
  5560. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  5561. auto results = batch_rule(self_value, self_bdim, size, dtype, layout, device, pin_memory);
  5562. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  5563. }
  5564. template <typename batch_rule_t, batch_rule_t batch_rule>
  5565. at::Tensor new_empty_strided_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
  5566. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5567. auto maybe_layer = maybeCurrentDynamicLayer();
  5568. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5569. int64_t cur_level = maybe_layer->layerId();
  5570. if (!isBatchedAtLevel(self, cur_level)) {
  5571. return at::_ops::new_empty_strided::call(self, size, stride, dtype, layout, device, pin_memory);
  5572. }
  5573. Tensor self_value;
  5574. optional<int64_t> self_bdim;
  5575. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  5576. auto results = batch_rule(self_value, self_bdim, size, stride, dtype, layout, device, pin_memory);
  5577. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  5578. }
  5579. template <typename batch_rule_t, batch_rule_t batch_rule>
  5580. at::Tensor new_full_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
  5581. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5582. auto maybe_layer = maybeCurrentDynamicLayer();
  5583. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5584. int64_t cur_level = maybe_layer->layerId();
  5585. if (!isBatchedAtLevel(self, cur_level)) {
  5586. return at::_ops::new_full::call(self, size, fill_value, dtype, layout, device, pin_memory);
  5587. }
  5588. Tensor self_value;
  5589. optional<int64_t> self_bdim;
  5590. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  5591. auto results = batch_rule(self_value, self_bdim, size, fill_value, dtype, layout, device, pin_memory);
  5592. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  5593. }
  5594. template <typename batch_rule_t, batch_rule_t batch_rule>
  5595. at::Tensor new_zeros_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
  5596. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5597. auto maybe_layer = maybeCurrentDynamicLayer();
  5598. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5599. int64_t cur_level = maybe_layer->layerId();
  5600. if (!isBatchedAtLevel(self, cur_level)) {
  5601. return at::_ops::new_zeros::call(self, size, dtype, layout, device, pin_memory);
  5602. }
  5603. Tensor self_value;
  5604. optional<int64_t> self_bdim;
  5605. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  5606. auto results = batch_rule(self_value, self_bdim, size, dtype, layout, device, pin_memory);
  5607. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  5608. }
  5609. template <typename batch_rule_t, batch_rule_t batch_rule>
  5610. at::Tensor new_ones_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
  5611. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5612. auto maybe_layer = maybeCurrentDynamicLayer();
  5613. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5614. int64_t cur_level = maybe_layer->layerId();
  5615. if (!isBatchedAtLevel(self, cur_level)) {
  5616. return at::_ops::new_ones::call(self, size, dtype, layout, device, pin_memory);
  5617. }
  5618. Tensor self_value;
  5619. optional<int64_t> self_bdim;
  5620. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  5621. auto results = batch_rule(self_value, self_bdim, size, dtype, layout, device, pin_memory);
  5622. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  5623. }
  5624. template <typename batch_rule_t, batch_rule_t batch_rule>
  5625. at::Tensor _empty_per_channel_affine_quantized_generated_plumbing(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
  5626. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5627. auto maybe_layer = maybeCurrentDynamicLayer();
  5628. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5629. int64_t cur_level = maybe_layer->layerId();
  5630. if (!isBatchedAtLevel(scales, cur_level) && !isBatchedAtLevel(zero_points, cur_level)) {
  5631. return at::_ops::_empty_per_channel_affine_quantized::call(size, scales, zero_points, axis, dtype, layout, device, pin_memory, memory_format);
  5632. }
  5633. Tensor scales_value;
  5634. optional<int64_t> scales_bdim;
  5635. std::tie(scales_value, scales_bdim) = unwrapTensorAtLevel(scales, cur_level);
  5636. Tensor zero_points_value;
  5637. optional<int64_t> zero_points_bdim;
  5638. std::tie(zero_points_value, zero_points_bdim) = unwrapTensorAtLevel(zero_points, cur_level);
  5639. auto results = batch_rule(size, scales_value, scales_bdim, zero_points_value, zero_points_bdim, axis, dtype, layout, device, pin_memory, memory_format);
  5640. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  5641. }
  5642. template <typename batch_rule_t, batch_rule_t batch_rule>
  5643. const at::Tensor & _resize_output__generated_plumbing(const at::Tensor & self, at::IntArrayRef size, at::Device device) {
  5644. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5645. auto maybe_layer = maybeCurrentDynamicLayer();
  5646. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  5647. int64_t cur_level = maybe_layer->layerId();
  5648. if (!isBatchedAtLevel(self, cur_level)) {
  5649. return at::_ops::_resize_output_::call(self, size, device);
  5650. }
  5651. Tensor self_value;
  5652. optional<int64_t> self_bdim;
  5653. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  5654. batch_rule(self_value, self_bdim, size, device);
  5655. return self;
  5656. }
  5657. template <typename batch_rule_t, batch_rule_t batch_rule>
  5658. at::Tensor empty_quantized_generated_plumbing(at::IntArrayRef size, const at::Tensor & qtensor, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
  5659. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5660. auto maybe_layer = maybeCurrentDynamicLayer();
  5661. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5662. int64_t cur_level = maybe_layer->layerId();
  5663. if (!isBatchedAtLevel(qtensor, cur_level)) {
  5664. return at::_ops::empty_quantized::call(size, qtensor, dtype, layout, device, pin_memory, memory_format);
  5665. }
  5666. Tensor qtensor_value;
  5667. optional<int64_t> qtensor_bdim;
  5668. std::tie(qtensor_value, qtensor_bdim) = unwrapTensorAtLevel(qtensor, cur_level);
  5669. auto results = batch_rule(size, qtensor_value, qtensor_bdim, dtype, layout, device, pin_memory, memory_format);
  5670. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  5671. }
  5672. template <typename batch_rule_t, batch_rule_t batch_rule>
  5673. at::Tensor empty_like_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
  5674. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5675. auto maybe_layer = maybeCurrentDynamicLayer();
  5676. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5677. int64_t cur_level = maybe_layer->layerId();
  5678. if (!isBatchedAtLevel(self, cur_level)) {
  5679. return at::_ops::empty_like::call(self, dtype, layout, device, pin_memory, memory_format);
  5680. }
  5681. Tensor self_value;
  5682. optional<int64_t> self_bdim;
  5683. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  5684. auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, memory_format);
  5685. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  5686. }
  5687. template <typename batch_rule_t, batch_rule_t batch_rule>
  5688. at::Tensor erf_generated_plumbing(const at::Tensor & self) {
  5689. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5690. auto maybe_layer = maybeCurrentDynamicLayer();
  5691. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5692. int64_t cur_level = maybe_layer->layerId();
  5693. if (!isBatchedAtLevel(self, cur_level)) {
  5694. return at::_ops::erf::call(self);
  5695. }
  5696. Tensor self_value;
  5697. optional<int64_t> self_bdim;
  5698. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  5699. auto results = batch_rule(self_value, self_bdim);
  5700. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  5701. }
  5702. template <typename batch_rule_t, batch_rule_t batch_rule>
  5703. at::Tensor & erf__generated_plumbing(at::Tensor & self) {
  5704. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5705. auto maybe_layer = maybeCurrentDynamicLayer();
  5706. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  5707. int64_t cur_level = maybe_layer->layerId();
  5708. if (!isBatchedAtLevel(self, cur_level)) {
  5709. return at::_ops::erf_::call(self);
  5710. }
  5711. Tensor self_value;
  5712. optional<int64_t> self_bdim;
  5713. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  5714. batch_rule(self_value, self_bdim);
  5715. return self;
  5716. }
  5717. template <typename batch_rule_t, batch_rule_t batch_rule>
  5718. at::Tensor erfc_generated_plumbing(const at::Tensor & self) {
  5719. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5720. auto maybe_layer = maybeCurrentDynamicLayer();
  5721. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5722. int64_t cur_level = maybe_layer->layerId();
  5723. if (!isBatchedAtLevel(self, cur_level)) {
  5724. return at::_ops::erfc::call(self);
  5725. }
  5726. Tensor self_value;
  5727. optional<int64_t> self_bdim;
  5728. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  5729. auto results = batch_rule(self_value, self_bdim);
  5730. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  5731. }
  5732. template <typename batch_rule_t, batch_rule_t batch_rule>
  5733. at::Tensor & erfc__generated_plumbing(at::Tensor & self) {
  5734. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5735. auto maybe_layer = maybeCurrentDynamicLayer();
  5736. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  5737. int64_t cur_level = maybe_layer->layerId();
  5738. if (!isBatchedAtLevel(self, cur_level)) {
  5739. return at::_ops::erfc_::call(self);
  5740. }
  5741. Tensor self_value;
  5742. optional<int64_t> self_bdim;
  5743. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  5744. batch_rule(self_value, self_bdim);
  5745. return self;
  5746. }
  5747. template <typename batch_rule_t, batch_rule_t batch_rule>
  5748. at::Tensor exp_generated_plumbing(const at::Tensor & self) {
  5749. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5750. auto maybe_layer = maybeCurrentDynamicLayer();
  5751. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5752. int64_t cur_level = maybe_layer->layerId();
  5753. if (!isBatchedAtLevel(self, cur_level)) {
  5754. return at::_ops::exp::call(self);
  5755. }
  5756. Tensor self_value;
  5757. optional<int64_t> self_bdim;
  5758. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  5759. auto results = batch_rule(self_value, self_bdim);
  5760. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  5761. }
  5762. template <typename batch_rule_t, batch_rule_t batch_rule>
  5763. at::Tensor & exp__generated_plumbing(at::Tensor & self) {
  5764. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5765. auto maybe_layer = maybeCurrentDynamicLayer();
  5766. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  5767. int64_t cur_level = maybe_layer->layerId();
  5768. if (!isBatchedAtLevel(self, cur_level)) {
  5769. return at::_ops::exp_::call(self);
  5770. }
  5771. Tensor self_value;
  5772. optional<int64_t> self_bdim;
  5773. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  5774. batch_rule(self_value, self_bdim);
  5775. return self;
  5776. }
  5777. template <typename batch_rule_t, batch_rule_t batch_rule>
  5778. at::Tensor exp2_generated_plumbing(const at::Tensor & self) {
  5779. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5780. auto maybe_layer = maybeCurrentDynamicLayer();
  5781. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5782. int64_t cur_level = maybe_layer->layerId();
  5783. if (!isBatchedAtLevel(self, cur_level)) {
  5784. return at::_ops::exp2::call(self);
  5785. }
  5786. Tensor self_value;
  5787. optional<int64_t> self_bdim;
  5788. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  5789. auto results = batch_rule(self_value, self_bdim);
  5790. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  5791. }
  5792. template <typename batch_rule_t, batch_rule_t batch_rule>
  5793. at::Tensor & exp2__generated_plumbing(at::Tensor & self) {
  5794. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5795. auto maybe_layer = maybeCurrentDynamicLayer();
  5796. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  5797. int64_t cur_level = maybe_layer->layerId();
  5798. if (!isBatchedAtLevel(self, cur_level)) {
  5799. return at::_ops::exp2_::call(self);
  5800. }
  5801. Tensor self_value;
  5802. optional<int64_t> self_bdim;
  5803. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  5804. batch_rule(self_value, self_bdim);
  5805. return self;
  5806. }
  5807. template <typename batch_rule_t, batch_rule_t batch_rule>
  5808. at::Tensor expm1_generated_plumbing(const at::Tensor & self) {
  5809. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5810. auto maybe_layer = maybeCurrentDynamicLayer();
  5811. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5812. int64_t cur_level = maybe_layer->layerId();
  5813. if (!isBatchedAtLevel(self, cur_level)) {
  5814. return at::_ops::expm1::call(self);
  5815. }
  5816. Tensor self_value;
  5817. optional<int64_t> self_bdim;
  5818. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  5819. auto results = batch_rule(self_value, self_bdim);
  5820. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  5821. }
  5822. template <typename batch_rule_t, batch_rule_t batch_rule>
  5823. at::Tensor & expm1__generated_plumbing(at::Tensor & self) {
  5824. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5825. auto maybe_layer = maybeCurrentDynamicLayer();
  5826. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  5827. int64_t cur_level = maybe_layer->layerId();
  5828. if (!isBatchedAtLevel(self, cur_level)) {
  5829. return at::_ops::expm1_::call(self);
  5830. }
  5831. Tensor self_value;
  5832. optional<int64_t> self_bdim;
  5833. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  5834. batch_rule(self_value, self_bdim);
  5835. return self;
  5836. }
  5837. template <typename batch_rule_t, batch_rule_t batch_rule>
  5838. at::Tensor expand_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit) {
  5839. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5840. auto maybe_layer = maybeCurrentDynamicLayer();
  5841. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5842. int64_t cur_level = maybe_layer->layerId();
  5843. if (!isBatchedAtLevel(self, cur_level)) {
  5844. return at::_ops::expand::call(self, size, implicit);
  5845. }
  5846. Tensor self_value;
  5847. optional<int64_t> self_bdim;
  5848. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  5849. auto results = batch_rule(self_value, self_bdim, size, implicit);
  5850. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  5851. }
  5852. template <typename batch_rule_t, batch_rule_t batch_rule>
  5853. at::Tensor expand_as_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  5854. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5855. auto maybe_layer = maybeCurrentDynamicLayer();
  5856. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5857. int64_t cur_level = maybe_layer->layerId();
  5858. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  5859. return at::_ops::expand_as::call(self, other);
  5860. }
  5861. Tensor self_value;
  5862. optional<int64_t> self_bdim;
  5863. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  5864. Tensor other_value;
  5865. optional<int64_t> other_bdim;
  5866. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  5867. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  5868. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  5869. }
  5870. template <typename batch_rule_t, batch_rule_t batch_rule>
  5871. at::Tensor flatten_using_ints_generated_plumbing(const at::Tensor & self, int64_t start_dim, int64_t end_dim) {
  5872. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5873. auto maybe_layer = maybeCurrentDynamicLayer();
  5874. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5875. int64_t cur_level = maybe_layer->layerId();
  5876. if (!isBatchedAtLevel(self, cur_level)) {
  5877. return at::_ops::flatten_using_ints::call(self, start_dim, end_dim);
  5878. }
  5879. Tensor self_value;
  5880. optional<int64_t> self_bdim;
  5881. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  5882. auto results = batch_rule(self_value, self_bdim, start_dim, end_dim);
  5883. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  5884. }
  5885. template <typename batch_rule_t, batch_rule_t batch_rule>
  5886. at::Tensor flatten_named_out_dim_generated_plumbing(const at::Tensor & self, int64_t start_dim, int64_t end_dim, at::Dimname out_dim) {
  5887. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5888. auto maybe_layer = maybeCurrentDynamicLayer();
  5889. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5890. int64_t cur_level = maybe_layer->layerId();
  5891. if (!isBatchedAtLevel(self, cur_level)) {
  5892. return at::_ops::flatten_named_out_dim::call(self, start_dim, end_dim, out_dim);
  5893. }
  5894. Tensor self_value;
  5895. optional<int64_t> self_bdim;
  5896. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  5897. auto results = batch_rule(self_value, self_bdim, start_dim, end_dim, out_dim);
  5898. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  5899. }
  5900. template <typename batch_rule_t, batch_rule_t batch_rule>
  5901. at::Tensor flatten_using_names_generated_plumbing(const at::Tensor & self, at::Dimname start_dim, at::Dimname end_dim, at::Dimname out_dim) {
  5902. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5903. auto maybe_layer = maybeCurrentDynamicLayer();
  5904. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5905. int64_t cur_level = maybe_layer->layerId();
  5906. if (!isBatchedAtLevel(self, cur_level)) {
  5907. return at::_ops::flatten_using_names::call(self, start_dim, end_dim, out_dim);
  5908. }
  5909. Tensor self_value;
  5910. optional<int64_t> self_bdim;
  5911. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  5912. auto results = batch_rule(self_value, self_bdim, start_dim, end_dim, out_dim);
  5913. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  5914. }
  5915. template <typename batch_rule_t, batch_rule_t batch_rule>
  5916. at::Tensor flatten_DimnameList_generated_plumbing(const at::Tensor & self, at::DimnameList dims, at::Dimname out_dim) {
  5917. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5918. auto maybe_layer = maybeCurrentDynamicLayer();
  5919. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5920. int64_t cur_level = maybe_layer->layerId();
  5921. if (!isBatchedAtLevel(self, cur_level)) {
  5922. return at::_ops::flatten_DimnameList::call(self, dims, out_dim);
  5923. }
  5924. Tensor self_value;
  5925. optional<int64_t> self_bdim;
  5926. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  5927. auto results = batch_rule(self_value, self_bdim, dims, out_dim);
  5928. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  5929. }
  5930. template <typename batch_rule_t, batch_rule_t batch_rule>
  5931. at::Tensor unflatten_int_generated_plumbing(const at::Tensor & self, int64_t dim, at::IntArrayRef sizes) {
  5932. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5933. auto maybe_layer = maybeCurrentDynamicLayer();
  5934. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5935. int64_t cur_level = maybe_layer->layerId();
  5936. if (!isBatchedAtLevel(self, cur_level)) {
  5937. return at::_ops::unflatten_int::call(self, dim, sizes);
  5938. }
  5939. Tensor self_value;
  5940. optional<int64_t> self_bdim;
  5941. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  5942. auto results = batch_rule(self_value, self_bdim, dim, sizes);
  5943. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  5944. }
  5945. template <typename batch_rule_t, batch_rule_t batch_rule>
  5946. at::Tensor unflatten_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, at::IntArrayRef sizes, at::DimnameList names) {
  5947. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5948. auto maybe_layer = maybeCurrentDynamicLayer();
  5949. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5950. int64_t cur_level = maybe_layer->layerId();
  5951. if (!isBatchedAtLevel(self, cur_level)) {
  5952. return at::_ops::unflatten_Dimname::call(self, dim, sizes, names);
  5953. }
  5954. Tensor self_value;
  5955. optional<int64_t> self_bdim;
  5956. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  5957. auto results = batch_rule(self_value, self_bdim, dim, sizes, names);
  5958. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  5959. }
  5960. template <typename batch_rule_t, batch_rule_t batch_rule>
  5961. at::Tensor fill_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & value) {
  5962. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5963. auto maybe_layer = maybeCurrentDynamicLayer();
  5964. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5965. int64_t cur_level = maybe_layer->layerId();
  5966. if (!isBatchedAtLevel(self, cur_level)) {
  5967. return at::_ops::fill_Scalar::call(self, value);
  5968. }
  5969. Tensor self_value;
  5970. optional<int64_t> self_bdim;
  5971. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  5972. auto results = batch_rule(self_value, self_bdim, value);
  5973. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  5974. }
  5975. template <typename batch_rule_t, batch_rule_t batch_rule>
  5976. at::Tensor fill_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & value) {
  5977. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5978. auto maybe_layer = maybeCurrentDynamicLayer();
  5979. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  5980. int64_t cur_level = maybe_layer->layerId();
  5981. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(value, cur_level)) {
  5982. return at::_ops::fill_Tensor::call(self, value);
  5983. }
  5984. Tensor self_value;
  5985. optional<int64_t> self_bdim;
  5986. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  5987. Tensor value_value;
  5988. optional<int64_t> value_bdim;
  5989. std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
  5990. auto results = batch_rule(self_value, self_bdim, value_value, value_bdim);
  5991. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  5992. }
  5993. template <typename batch_rule_t, batch_rule_t batch_rule>
  5994. at::Tensor & fill__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & value) {
  5995. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  5996. auto maybe_layer = maybeCurrentDynamicLayer();
  5997. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  5998. int64_t cur_level = maybe_layer->layerId();
  5999. if (!isBatchedAtLevel(self, cur_level)) {
  6000. return at::_ops::fill__Scalar::call(self, value);
  6001. }
  6002. Tensor self_value;
  6003. optional<int64_t> self_bdim;
  6004. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  6005. batch_rule(self_value, self_bdim, value);
  6006. return self;
  6007. }
  6008. template <typename batch_rule_t, batch_rule_t batch_rule>
  6009. at::Tensor & fill__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & value) {
  6010. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6011. auto maybe_layer = maybeCurrentDynamicLayer();
  6012. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  6013. int64_t cur_level = maybe_layer->layerId();
  6014. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(value, cur_level)) {
  6015. return at::_ops::fill__Tensor::call(self, value);
  6016. }
  6017. Tensor self_value;
  6018. optional<int64_t> self_bdim;
  6019. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  6020. Tensor value_value;
  6021. optional<int64_t> value_bdim;
  6022. std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
  6023. batch_rule(self_value, self_bdim, value_value, value_bdim);
  6024. return self;
  6025. }
  6026. template <typename batch_rule_t, batch_rule_t batch_rule>
  6027. at::Tensor floor_generated_plumbing(const at::Tensor & self) {
  6028. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6029. auto maybe_layer = maybeCurrentDynamicLayer();
  6030. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  6031. int64_t cur_level = maybe_layer->layerId();
  6032. if (!isBatchedAtLevel(self, cur_level)) {
  6033. return at::_ops::floor::call(self);
  6034. }
  6035. Tensor self_value;
  6036. optional<int64_t> self_bdim;
  6037. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  6038. auto results = batch_rule(self_value, self_bdim);
  6039. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  6040. }
  6041. template <typename batch_rule_t, batch_rule_t batch_rule>
  6042. at::Tensor & floor__generated_plumbing(at::Tensor & self) {
  6043. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6044. auto maybe_layer = maybeCurrentDynamicLayer();
  6045. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  6046. int64_t cur_level = maybe_layer->layerId();
  6047. if (!isBatchedAtLevel(self, cur_level)) {
  6048. return at::_ops::floor_::call(self);
  6049. }
  6050. Tensor self_value;
  6051. optional<int64_t> self_bdim;
  6052. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  6053. batch_rule(self_value, self_bdim);
  6054. return self;
  6055. }
  6056. template <typename batch_rule_t, batch_rule_t batch_rule>
  6057. at::Tensor floor_divide_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  6058. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6059. auto maybe_layer = maybeCurrentDynamicLayer();
  6060. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  6061. int64_t cur_level = maybe_layer->layerId();
  6062. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  6063. return at::_ops::floor_divide::call(self, other);
  6064. }
  6065. Tensor self_value;
  6066. optional<int64_t> self_bdim;
  6067. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  6068. Tensor other_value;
  6069. optional<int64_t> other_bdim;
  6070. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  6071. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  6072. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  6073. }
  6074. template <typename batch_rule_t, batch_rule_t batch_rule>
  6075. at::Tensor & floor_divide__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  6076. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6077. auto maybe_layer = maybeCurrentDynamicLayer();
  6078. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  6079. int64_t cur_level = maybe_layer->layerId();
  6080. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  6081. return at::_ops::floor_divide__Tensor::call(self, other);
  6082. }
  6083. Tensor self_value;
  6084. optional<int64_t> self_bdim;
  6085. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  6086. Tensor other_value;
  6087. optional<int64_t> other_bdim;
  6088. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  6089. batch_rule(self_value, self_bdim, other_value, other_bdim);
  6090. return self;
  6091. }
  6092. template <typename batch_rule_t, batch_rule_t batch_rule>
  6093. at::Tensor floor_divide_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
  6094. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6095. auto maybe_layer = maybeCurrentDynamicLayer();
  6096. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  6097. int64_t cur_level = maybe_layer->layerId();
  6098. if (!isBatchedAtLevel(self, cur_level)) {
  6099. return at::_ops::floor_divide_Scalar::call(self, other);
  6100. }
  6101. Tensor self_value;
  6102. optional<int64_t> self_bdim;
  6103. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  6104. auto results = batch_rule(self_value, self_bdim, other);
  6105. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  6106. }
  6107. template <typename batch_rule_t, batch_rule_t batch_rule>
  6108. at::Tensor & floor_divide__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
  6109. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6110. auto maybe_layer = maybeCurrentDynamicLayer();
  6111. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  6112. int64_t cur_level = maybe_layer->layerId();
  6113. if (!isBatchedAtLevel(self, cur_level)) {
  6114. return at::_ops::floor_divide__Scalar::call(self, other);
  6115. }
  6116. Tensor self_value;
  6117. optional<int64_t> self_bdim;
  6118. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  6119. batch_rule(self_value, self_bdim, other);
  6120. return self;
  6121. }
  6122. template <typename batch_rule_t, batch_rule_t batch_rule>
  6123. at::Tensor frac_generated_plumbing(const at::Tensor & self) {
  6124. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6125. auto maybe_layer = maybeCurrentDynamicLayer();
  6126. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  6127. int64_t cur_level = maybe_layer->layerId();
  6128. if (!isBatchedAtLevel(self, cur_level)) {
  6129. return at::_ops::frac::call(self);
  6130. }
  6131. Tensor self_value;
  6132. optional<int64_t> self_bdim;
  6133. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  6134. auto results = batch_rule(self_value, self_bdim);
  6135. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  6136. }
  6137. template <typename batch_rule_t, batch_rule_t batch_rule>
  6138. at::Tensor & frac__generated_plumbing(at::Tensor & self) {
  6139. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6140. auto maybe_layer = maybeCurrentDynamicLayer();
  6141. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  6142. int64_t cur_level = maybe_layer->layerId();
  6143. if (!isBatchedAtLevel(self, cur_level)) {
  6144. return at::_ops::frac_::call(self);
  6145. }
  6146. Tensor self_value;
  6147. optional<int64_t> self_bdim;
  6148. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  6149. batch_rule(self_value, self_bdim);
  6150. return self;
  6151. }
  6152. template <typename batch_rule_t, batch_rule_t batch_rule>
  6153. at::Tensor full_like_generated_plumbing(const at::Tensor & self, const at::Scalar & fill_value, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
  6154. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6155. auto maybe_layer = maybeCurrentDynamicLayer();
  6156. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  6157. int64_t cur_level = maybe_layer->layerId();
  6158. if (!isBatchedAtLevel(self, cur_level)) {
  6159. return at::_ops::full_like::call(self, fill_value, dtype, layout, device, pin_memory, memory_format);
  6160. }
  6161. Tensor self_value;
  6162. optional<int64_t> self_bdim;
  6163. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  6164. auto results = batch_rule(self_value, self_bdim, fill_value, dtype, layout, device, pin_memory, memory_format);
  6165. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  6166. }
  6167. template <typename batch_rule_t, batch_rule_t batch_rule>
  6168. at::Tensor gcd_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  6169. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6170. auto maybe_layer = maybeCurrentDynamicLayer();
  6171. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  6172. int64_t cur_level = maybe_layer->layerId();
  6173. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  6174. return at::_ops::gcd::call(self, other);
  6175. }
  6176. Tensor self_value;
  6177. optional<int64_t> self_bdim;
  6178. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  6179. Tensor other_value;
  6180. optional<int64_t> other_bdim;
  6181. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  6182. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  6183. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  6184. }
  6185. template <typename batch_rule_t, batch_rule_t batch_rule>
  6186. at::Tensor & gcd__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  6187. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6188. auto maybe_layer = maybeCurrentDynamicLayer();
  6189. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  6190. int64_t cur_level = maybe_layer->layerId();
  6191. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  6192. return at::_ops::gcd_::call(self, other);
  6193. }
  6194. Tensor self_value;
  6195. optional<int64_t> self_bdim;
  6196. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  6197. Tensor other_value;
  6198. optional<int64_t> other_bdim;
  6199. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  6200. batch_rule(self_value, self_bdim, other_value, other_bdim);
  6201. return self;
  6202. }
  6203. template <typename batch_rule_t, batch_rule_t batch_rule>
  6204. at::Tensor lcm_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  6205. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6206. auto maybe_layer = maybeCurrentDynamicLayer();
  6207. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  6208. int64_t cur_level = maybe_layer->layerId();
  6209. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  6210. return at::_ops::lcm::call(self, other);
  6211. }
  6212. Tensor self_value;
  6213. optional<int64_t> self_bdim;
  6214. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  6215. Tensor other_value;
  6216. optional<int64_t> other_bdim;
  6217. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  6218. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  6219. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  6220. }
  6221. template <typename batch_rule_t, batch_rule_t batch_rule>
  6222. at::Tensor & lcm__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  6223. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6224. auto maybe_layer = maybeCurrentDynamicLayer();
  6225. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  6226. int64_t cur_level = maybe_layer->layerId();
  6227. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  6228. return at::_ops::lcm_::call(self, other);
  6229. }
  6230. Tensor self_value;
  6231. optional<int64_t> self_bdim;
  6232. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  6233. Tensor other_value;
  6234. optional<int64_t> other_bdim;
  6235. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  6236. batch_rule(self_value, self_bdim, other_value, other_bdim);
  6237. return self;
  6238. }
  6239. template <typename batch_rule_t, batch_rule_t batch_rule>
  6240. at::Tensor grid_sampler_generated_plumbing(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
  6241. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6242. auto maybe_layer = maybeCurrentDynamicLayer();
  6243. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  6244. int64_t cur_level = maybe_layer->layerId();
  6245. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) {
  6246. return at::_ops::grid_sampler::call(input, grid, interpolation_mode, padding_mode, align_corners);
  6247. }
  6248. Tensor input_value;
  6249. optional<int64_t> input_bdim;
  6250. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  6251. Tensor grid_value;
  6252. optional<int64_t> grid_bdim;
  6253. std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level);
  6254. auto results = batch_rule(input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners);
  6255. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  6256. }
  6257. template <typename batch_rule_t, batch_rule_t batch_rule>
  6258. at::Tensor grid_sampler_2d_generated_plumbing(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
  6259. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6260. auto maybe_layer = maybeCurrentDynamicLayer();
  6261. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  6262. int64_t cur_level = maybe_layer->layerId();
  6263. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) {
  6264. return at::_ops::grid_sampler_2d::call(input, grid, interpolation_mode, padding_mode, align_corners);
  6265. }
  6266. Tensor input_value;
  6267. optional<int64_t> input_bdim;
  6268. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  6269. Tensor grid_value;
  6270. optional<int64_t> grid_bdim;
  6271. std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level);
  6272. auto results = batch_rule(input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners);
  6273. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  6274. }
  6275. template <typename batch_rule_t, batch_rule_t batch_rule>
  6276. ::std::tuple<at::Tensor,at::Tensor> grid_sampler_2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask) {
  6277. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6278. auto maybe_layer = maybeCurrentDynamicLayer();
  6279. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  6280. int64_t cur_level = maybe_layer->layerId();
  6281. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) {
  6282. return at::_ops::grid_sampler_2d_backward::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask);
  6283. }
  6284. Tensor grad_output_value;
  6285. optional<int64_t> grad_output_bdim;
  6286. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  6287. Tensor input_value;
  6288. optional<int64_t> input_bdim;
  6289. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  6290. Tensor grid_value;
  6291. optional<int64_t> grid_bdim;
  6292. std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level);
  6293. auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners, output_mask);
  6294. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  6295. }
  6296. template <typename batch_rule_t, batch_rule_t batch_rule>
  6297. at::Tensor _grid_sampler_2d_cpu_fallback_generated_plumbing(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
  6298. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6299. auto maybe_layer = maybeCurrentDynamicLayer();
  6300. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  6301. int64_t cur_level = maybe_layer->layerId();
  6302. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) {
  6303. return at::_ops::_grid_sampler_2d_cpu_fallback::call(input, grid, interpolation_mode, padding_mode, align_corners);
  6304. }
  6305. Tensor input_value;
  6306. optional<int64_t> input_bdim;
  6307. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  6308. Tensor grid_value;
  6309. optional<int64_t> grid_bdim;
  6310. std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level);
  6311. auto results = batch_rule(input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners);
  6312. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  6313. }
  6314. template <typename batch_rule_t, batch_rule_t batch_rule>
  6315. ::std::tuple<at::Tensor,at::Tensor> _grid_sampler_2d_cpu_fallback_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
  6316. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6317. auto maybe_layer = maybeCurrentDynamicLayer();
  6318. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  6319. int64_t cur_level = maybe_layer->layerId();
  6320. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) {
  6321. return at::_ops::_grid_sampler_2d_cpu_fallback_backward::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners);
  6322. }
  6323. Tensor grad_output_value;
  6324. optional<int64_t> grad_output_bdim;
  6325. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  6326. Tensor input_value;
  6327. optional<int64_t> input_bdim;
  6328. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  6329. Tensor grid_value;
  6330. optional<int64_t> grid_bdim;
  6331. std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level);
  6332. auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners);
  6333. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  6334. }
  6335. template <typename batch_rule_t, batch_rule_t batch_rule>
  6336. at::Tensor grid_sampler_3d_generated_plumbing(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
  6337. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6338. auto maybe_layer = maybeCurrentDynamicLayer();
  6339. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  6340. int64_t cur_level = maybe_layer->layerId();
  6341. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) {
  6342. return at::_ops::grid_sampler_3d::call(input, grid, interpolation_mode, padding_mode, align_corners);
  6343. }
  6344. Tensor input_value;
  6345. optional<int64_t> input_bdim;
  6346. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  6347. Tensor grid_value;
  6348. optional<int64_t> grid_bdim;
  6349. std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level);
  6350. auto results = batch_rule(input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners);
  6351. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  6352. }
  6353. template <typename batch_rule_t, batch_rule_t batch_rule>
  6354. ::std::tuple<at::Tensor,at::Tensor> grid_sampler_3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask) {
  6355. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6356. auto maybe_layer = maybeCurrentDynamicLayer();
  6357. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  6358. int64_t cur_level = maybe_layer->layerId();
  6359. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) {
  6360. return at::_ops::grid_sampler_3d_backward::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask);
  6361. }
  6362. Tensor grad_output_value;
  6363. optional<int64_t> grad_output_bdim;
  6364. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  6365. Tensor input_value;
  6366. optional<int64_t> input_bdim;
  6367. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  6368. Tensor grid_value;
  6369. optional<int64_t> grid_bdim;
  6370. std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level);
  6371. auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners, output_mask);
  6372. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  6373. }
  6374. template <typename batch_rule_t, batch_rule_t batch_rule>
  6375. at::Tensor hinge_embedding_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, double margin, int64_t reduction) {
  6376. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6377. auto maybe_layer = maybeCurrentDynamicLayer();
  6378. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  6379. int64_t cur_level = maybe_layer->layerId();
  6380. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
  6381. return at::_ops::hinge_embedding_loss::call(self, target, margin, reduction);
  6382. }
  6383. Tensor self_value;
  6384. optional<int64_t> self_bdim;
  6385. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  6386. Tensor target_value;
  6387. optional<int64_t> target_bdim;
  6388. std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
  6389. auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, margin, reduction);
  6390. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  6391. }
  6392. template <typename batch_rule_t, batch_rule_t batch_rule>
  6393. at::Tensor group_norm_generated_plumbing(const at::Tensor & input, int64_t num_groups, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps, bool cudnn_enabled) {
  6394. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6395. auto maybe_layer = maybeCurrentDynamicLayer();
  6396. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  6397. int64_t cur_level = maybe_layer->layerId();
  6398. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  6399. return at::_ops::group_norm::call(input, num_groups, weight, bias, eps, cudnn_enabled);
  6400. }
  6401. Tensor input_value;
  6402. optional<int64_t> input_bdim;
  6403. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  6404. optional<Tensor> weight_value;
  6405. optional<int64_t> weight_bdim;
  6406. if (weight) {
  6407. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
  6408. }
  6409. optional<Tensor> bias_value;
  6410. optional<int64_t> bias_bdim;
  6411. if (bias) {
  6412. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  6413. }
  6414. auto results = batch_rule(input_value, input_bdim, num_groups, weight_value, weight_bdim, bias_value, bias_bdim, eps, cudnn_enabled);
  6415. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  6416. }
  6417. template <typename batch_rule_t, batch_rule_t batch_rule>
  6418. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_group_norm_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps) {
  6419. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6420. auto maybe_layer = maybeCurrentDynamicLayer();
  6421. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  6422. int64_t cur_level = maybe_layer->layerId();
  6423. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  6424. return at::_ops::native_group_norm::call(input, weight, bias, N, C, HxW, group, eps);
  6425. }
  6426. Tensor input_value;
  6427. optional<int64_t> input_bdim;
  6428. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  6429. optional<Tensor> weight_value;
  6430. optional<int64_t> weight_bdim;
  6431. if (weight) {
  6432. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
  6433. }
  6434. optional<Tensor> bias_value;
  6435. optional<int64_t> bias_bdim;
  6436. if (bias) {
  6437. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  6438. }
  6439. auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, N, C, HxW, group, eps);
  6440. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  6441. }
  6442. template <typename batch_rule_t, batch_rule_t batch_rule>
  6443. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_group_norm_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array<bool,3> output_mask) {
  6444. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6445. auto maybe_layer = maybeCurrentDynamicLayer();
  6446. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  6447. int64_t cur_level = maybe_layer->layerId();
  6448. if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(rstd, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
  6449. return at::_ops::native_group_norm_backward::call(grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask);
  6450. }
  6451. Tensor grad_out_value;
  6452. optional<int64_t> grad_out_bdim;
  6453. std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level);
  6454. Tensor input_value;
  6455. optional<int64_t> input_bdim;
  6456. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  6457. Tensor mean_value;
  6458. optional<int64_t> mean_bdim;
  6459. std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level);
  6460. Tensor rstd_value;
  6461. optional<int64_t> rstd_bdim;
  6462. std::tie(rstd_value, rstd_bdim) = unwrapTensorAtLevel(rstd, cur_level);
  6463. optional<Tensor> weight_value;
  6464. optional<int64_t> weight_bdim;
  6465. if (weight) {
  6466. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
  6467. }
  6468. auto results = batch_rule(grad_out_value, grad_out_bdim, input_value, input_bdim, mean_value, mean_bdim, rstd_value, rstd_bdim, weight_value, weight_bdim, N, C, HxW, group, output_mask);
  6469. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  6470. }
  6471. template <typename batch_rule_t, batch_rule_t batch_rule>
  6472. at::Tensor _fft_r2c_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided) {
  6473. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6474. auto maybe_layer = maybeCurrentDynamicLayer();
  6475. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  6476. int64_t cur_level = maybe_layer->layerId();
  6477. if (!isBatchedAtLevel(self, cur_level)) {
  6478. return at::_ops::_fft_r2c::call(self, dim, normalization, onesided);
  6479. }
  6480. Tensor self_value;
  6481. optional<int64_t> self_bdim;
  6482. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  6483. auto results = batch_rule(self_value, self_bdim, dim, normalization, onesided);
  6484. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  6485. }
  6486. template <typename batch_rule_t, batch_rule_t batch_rule>
  6487. at::Tensor _fft_c2r_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, int64_t last_dim_size) {
  6488. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6489. auto maybe_layer = maybeCurrentDynamicLayer();
  6490. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  6491. int64_t cur_level = maybe_layer->layerId();
  6492. if (!isBatchedAtLevel(self, cur_level)) {
  6493. return at::_ops::_fft_c2r::call(self, dim, normalization, last_dim_size);
  6494. }
  6495. Tensor self_value;
  6496. optional<int64_t> self_bdim;
  6497. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  6498. auto results = batch_rule(self_value, self_bdim, dim, normalization, last_dim_size);
  6499. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  6500. }
  6501. template <typename batch_rule_t, batch_rule_t batch_rule>
  6502. at::Tensor _fft_c2c_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward) {
  6503. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6504. auto maybe_layer = maybeCurrentDynamicLayer();
  6505. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  6506. int64_t cur_level = maybe_layer->layerId();
  6507. if (!isBatchedAtLevel(self, cur_level)) {
  6508. return at::_ops::_fft_c2c::call(self, dim, normalization, forward);
  6509. }
  6510. Tensor self_value;
  6511. optional<int64_t> self_bdim;
  6512. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  6513. auto results = batch_rule(self_value, self_bdim, dim, normalization, forward);
  6514. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  6515. }
  6516. template <typename batch_rule_t, batch_rule_t batch_rule>
  6517. void _validate_compressed_sparse_indices_generated_plumbing(bool is_crow, const at::Tensor & compressed_idx, const at::Tensor & plain_idx, int64_t cdim, int64_t dim, int64_t nnz) {
  6518. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6519. auto maybe_layer = maybeCurrentDynamicLayer();
  6520. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  6521. int64_t cur_level = maybe_layer->layerId();
  6522. if (!isBatchedAtLevel(compressed_idx, cur_level) && !isBatchedAtLevel(plain_idx, cur_level)) {
  6523. return at::_ops::_validate_compressed_sparse_indices::call(is_crow, compressed_idx, plain_idx, cdim, dim, nnz);
  6524. }
  6525. Tensor compressed_idx_value;
  6526. optional<int64_t> compressed_idx_bdim;
  6527. std::tie(compressed_idx_value, compressed_idx_bdim) = unwrapTensorAtLevel(compressed_idx, cur_level);
  6528. Tensor plain_idx_value;
  6529. optional<int64_t> plain_idx_bdim;
  6530. std::tie(plain_idx_value, plain_idx_bdim) = unwrapTensorAtLevel(plain_idx, cur_level);
  6531. batch_rule(is_crow, compressed_idx_value, compressed_idx_bdim, plain_idx_value, plain_idx_bdim, cdim, dim, nnz);
  6532. }
  6533. template <typename batch_rule_t, batch_rule_t batch_rule>
  6534. at::Tensor index_Tensor_generated_plumbing(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices) {
  6535. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6536. auto maybe_layer = maybeCurrentDynamicLayer();
  6537. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  6538. int64_t cur_level = maybe_layer->layerId();
  6539. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
  6540. return at::_ops::index_Tensor::call(self, indices);
  6541. }
  6542. Tensor self_value;
  6543. optional<int64_t> self_bdim;
  6544. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  6545. auto results = batch_rule(self_value, self_bdim, indices);
  6546. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  6547. }
  6548. template <typename batch_rule_t, batch_rule_t batch_rule>
  6549. at::Tensor & index_copy__generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) {
  6550. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6551. auto maybe_layer = maybeCurrentDynamicLayer();
  6552. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  6553. int64_t cur_level = maybe_layer->layerId();
  6554. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
  6555. return at::_ops::index_copy_::call(self, dim, index, source);
  6556. }
  6557. Tensor self_value;
  6558. optional<int64_t> self_bdim;
  6559. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  6560. Tensor index_value;
  6561. optional<int64_t> index_bdim;
  6562. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  6563. Tensor source_value;
  6564. optional<int64_t> source_bdim;
  6565. std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
  6566. batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim);
  6567. return self;
  6568. }
  6569. template <typename batch_rule_t, batch_rule_t batch_rule>
  6570. at::Tensor index_copy_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) {
  6571. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6572. auto maybe_layer = maybeCurrentDynamicLayer();
  6573. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  6574. int64_t cur_level = maybe_layer->layerId();
  6575. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
  6576. return at::_ops::index_copy::call(self, dim, index, source);
  6577. }
  6578. Tensor self_value;
  6579. optional<int64_t> self_bdim;
  6580. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  6581. Tensor index_value;
  6582. optional<int64_t> index_bdim;
  6583. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  6584. Tensor source_value;
  6585. optional<int64_t> source_bdim;
  6586. std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
  6587. auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim);
  6588. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  6589. }
  6590. template <typename batch_rule_t, batch_rule_t batch_rule>
  6591. at::Tensor & index_copy__dimname_generated_plumbing(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source) {
  6592. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6593. auto maybe_layer = maybeCurrentDynamicLayer();
  6594. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  6595. int64_t cur_level = maybe_layer->layerId();
  6596. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
  6597. return at::_ops::index_copy__dimname::call(self, dim, index, source);
  6598. }
  6599. Tensor self_value;
  6600. optional<int64_t> self_bdim;
  6601. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  6602. Tensor index_value;
  6603. optional<int64_t> index_bdim;
  6604. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  6605. Tensor source_value;
  6606. optional<int64_t> source_bdim;
  6607. std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
  6608. batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim);
  6609. return self;
  6610. }
  6611. template <typename batch_rule_t, batch_rule_t batch_rule>
  6612. at::Tensor index_copy_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source) {
  6613. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6614. auto maybe_layer = maybeCurrentDynamicLayer();
  6615. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  6616. int64_t cur_level = maybe_layer->layerId();
  6617. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
  6618. return at::_ops::index_copy_dimname::call(self, dim, index, source);
  6619. }
  6620. Tensor self_value;
  6621. optional<int64_t> self_bdim;
  6622. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  6623. Tensor index_value;
  6624. optional<int64_t> index_bdim;
  6625. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  6626. Tensor source_value;
  6627. optional<int64_t> source_bdim;
  6628. std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
  6629. auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim);
  6630. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  6631. }
  6632. template <typename batch_rule_t, batch_rule_t batch_rule>
  6633. at::Tensor & index_put__generated_plumbing(at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate) {
  6634. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6635. auto maybe_layer = maybeCurrentDynamicLayer();
  6636. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  6637. int64_t cur_level = maybe_layer->layerId();
  6638. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
  6639. return at::_ops::index_put_::call(self, indices, values, accumulate);
  6640. }
  6641. Tensor self_value;
  6642. optional<int64_t> self_bdim;
  6643. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  6644. Tensor values_value;
  6645. optional<int64_t> values_bdim;
  6646. std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
  6647. batch_rule(self_value, self_bdim, indices, values_value, values_bdim, accumulate);
  6648. return self;
  6649. }
  6650. template <typename batch_rule_t, batch_rule_t batch_rule>
  6651. at::Tensor index_put_generated_plumbing(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate) {
  6652. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6653. auto maybe_layer = maybeCurrentDynamicLayer();
  6654. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  6655. int64_t cur_level = maybe_layer->layerId();
  6656. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
  6657. return at::_ops::index_put::call(self, indices, values, accumulate);
  6658. }
  6659. Tensor self_value;
  6660. optional<int64_t> self_bdim;
  6661. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  6662. Tensor values_value;
  6663. optional<int64_t> values_bdim;
  6664. std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
  6665. auto results = batch_rule(self_value, self_bdim, indices, values_value, values_bdim, accumulate);
  6666. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  6667. }
  6668. template <typename batch_rule_t, batch_rule_t batch_rule>
  6669. at::Tensor & _index_put_impl__generated_plumbing(at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe) {
  6670. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6671. auto maybe_layer = maybeCurrentDynamicLayer();
  6672. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  6673. int64_t cur_level = maybe_layer->layerId();
  6674. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
  6675. return at::_ops::_index_put_impl_::call(self, indices, values, accumulate, unsafe);
  6676. }
  6677. Tensor self_value;
  6678. optional<int64_t> self_bdim;
  6679. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  6680. Tensor values_value;
  6681. optional<int64_t> values_bdim;
  6682. std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
  6683. batch_rule(self_value, self_bdim, indices, values_value, values_bdim, accumulate, unsafe);
  6684. return self;
  6685. }
  6686. template <typename batch_rule_t, batch_rule_t batch_rule>
  6687. at::Tensor instance_norm_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled) {
  6688. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6689. auto maybe_layer = maybeCurrentDynamicLayer();
  6690. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  6691. int64_t cur_level = maybe_layer->layerId();
  6692. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
  6693. return at::_ops::instance_norm::call(input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, cudnn_enabled);
  6694. }
  6695. Tensor input_value;
  6696. optional<int64_t> input_bdim;
  6697. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  6698. optional<Tensor> weight_value;
  6699. optional<int64_t> weight_bdim;
  6700. if (weight) {
  6701. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
  6702. }
  6703. optional<Tensor> bias_value;
  6704. optional<int64_t> bias_bdim;
  6705. if (bias) {
  6706. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  6707. }
  6708. optional<Tensor> running_mean_value;
  6709. optional<int64_t> running_mean_bdim;
  6710. if (running_mean) {
  6711. std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
  6712. }
  6713. optional<Tensor> running_var_value;
  6714. optional<int64_t> running_var_bdim;
  6715. if (running_var) {
  6716. std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
  6717. }
  6718. auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, use_input_stats, momentum, eps, cudnn_enabled);
  6719. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  6720. }
  6721. template <typename batch_rule_t, batch_rule_t batch_rule>
  6722. at::Tensor isclose_generated_plumbing(const at::Tensor & self, const at::Tensor & other, double rtol, double atol, bool equal_nan) {
  6723. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6724. auto maybe_layer = maybeCurrentDynamicLayer();
  6725. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  6726. int64_t cur_level = maybe_layer->layerId();
  6727. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  6728. return at::_ops::isclose::call(self, other, rtol, atol, equal_nan);
  6729. }
  6730. Tensor self_value;
  6731. optional<int64_t> self_bdim;
  6732. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  6733. Tensor other_value;
  6734. optional<int64_t> other_bdim;
  6735. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  6736. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, rtol, atol, equal_nan);
  6737. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  6738. }
  6739. template <typename batch_rule_t, batch_rule_t batch_rule>
  6740. at::Tensor isin_Tensor_Tensor_generated_plumbing(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert) {
  6741. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6742. auto maybe_layer = maybeCurrentDynamicLayer();
  6743. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  6744. int64_t cur_level = maybe_layer->layerId();
  6745. if (!isBatchedAtLevel(elements, cur_level) && !isBatchedAtLevel(test_elements, cur_level)) {
  6746. return at::_ops::isin_Tensor_Tensor::call(elements, test_elements, assume_unique, invert);
  6747. }
  6748. Tensor elements_value;
  6749. optional<int64_t> elements_bdim;
  6750. std::tie(elements_value, elements_bdim) = unwrapTensorAtLevel(elements, cur_level);
  6751. Tensor test_elements_value;
  6752. optional<int64_t> test_elements_bdim;
  6753. std::tie(test_elements_value, test_elements_bdim) = unwrapTensorAtLevel(test_elements, cur_level);
  6754. auto results = batch_rule(elements_value, elements_bdim, test_elements_value, test_elements_bdim, assume_unique, invert);
  6755. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  6756. }
  6757. template <typename batch_rule_t, batch_rule_t batch_rule>
  6758. at::Tensor isin_Tensor_Scalar_generated_plumbing(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert) {
  6759. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6760. auto maybe_layer = maybeCurrentDynamicLayer();
  6761. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  6762. int64_t cur_level = maybe_layer->layerId();
  6763. if (!isBatchedAtLevel(elements, cur_level)) {
  6764. return at::_ops::isin_Tensor_Scalar::call(elements, test_element, assume_unique, invert);
  6765. }
  6766. Tensor elements_value;
  6767. optional<int64_t> elements_bdim;
  6768. std::tie(elements_value, elements_bdim) = unwrapTensorAtLevel(elements, cur_level);
  6769. auto results = batch_rule(elements_value, elements_bdim, test_element, assume_unique, invert);
  6770. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  6771. }
  6772. template <typename batch_rule_t, batch_rule_t batch_rule>
  6773. at::Tensor isin_Scalar_Tensor_generated_plumbing(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert) {
  6774. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6775. auto maybe_layer = maybeCurrentDynamicLayer();
  6776. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  6777. int64_t cur_level = maybe_layer->layerId();
  6778. if (!isBatchedAtLevel(test_elements, cur_level)) {
  6779. return at::_ops::isin_Scalar_Tensor::call(element, test_elements, assume_unique, invert);
  6780. }
  6781. Tensor test_elements_value;
  6782. optional<int64_t> test_elements_bdim;
  6783. std::tie(test_elements_value, test_elements_bdim) = unwrapTensorAtLevel(test_elements, cur_level);
  6784. auto results = batch_rule(element, test_elements_value, test_elements_bdim, assume_unique, invert);
  6785. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  6786. }
  6787. template <typename batch_rule_t, batch_rule_t batch_rule>
  6788. at::Tensor isnan_generated_plumbing(const at::Tensor & self) {
  6789. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6790. auto maybe_layer = maybeCurrentDynamicLayer();
  6791. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  6792. int64_t cur_level = maybe_layer->layerId();
  6793. if (!isBatchedAtLevel(self, cur_level)) {
  6794. return at::_ops::isnan::call(self);
  6795. }
  6796. Tensor self_value;
  6797. optional<int64_t> self_bdim;
  6798. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  6799. auto results = batch_rule(self_value, self_bdim);
  6800. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  6801. }
  6802. template <typename batch_rule_t, batch_rule_t batch_rule>
  6803. at::Tensor isreal_generated_plumbing(const at::Tensor & self) {
  6804. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6805. auto maybe_layer = maybeCurrentDynamicLayer();
  6806. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  6807. int64_t cur_level = maybe_layer->layerId();
  6808. if (!isBatchedAtLevel(self, cur_level)) {
  6809. return at::_ops::isreal::call(self);
  6810. }
  6811. Tensor self_value;
  6812. optional<int64_t> self_bdim;
  6813. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  6814. auto results = batch_rule(self_value, self_bdim);
  6815. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  6816. }
  6817. template <typename batch_rule_t, batch_rule_t batch_rule>
  6818. at::Tensor kl_div_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction, bool log_target) {
  6819. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6820. auto maybe_layer = maybeCurrentDynamicLayer();
  6821. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  6822. int64_t cur_level = maybe_layer->layerId();
  6823. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
  6824. return at::_ops::kl_div::call(self, target, reduction, log_target);
  6825. }
  6826. Tensor self_value;
  6827. optional<int64_t> self_bdim;
  6828. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  6829. Tensor target_value;
  6830. optional<int64_t> target_bdim;
  6831. std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
  6832. auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction, log_target);
  6833. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  6834. }
  6835. template <typename batch_rule_t, batch_rule_t batch_rule>
  6836. at::Tensor kron_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  6837. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6838. auto maybe_layer = maybeCurrentDynamicLayer();
  6839. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  6840. int64_t cur_level = maybe_layer->layerId();
  6841. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  6842. return at::_ops::kron::call(self, other);
  6843. }
  6844. Tensor self_value;
  6845. optional<int64_t> self_bdim;
  6846. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  6847. Tensor other_value;
  6848. optional<int64_t> other_bdim;
  6849. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  6850. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  6851. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  6852. }
  6853. template <typename batch_rule_t, batch_rule_t batch_rule>
  6854. ::std::tuple<at::Tensor,at::Tensor> kthvalue_generated_plumbing(const at::Tensor & self, int64_t k, int64_t dim, bool keepdim) {
  6855. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6856. auto maybe_layer = maybeCurrentDynamicLayer();
  6857. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  6858. int64_t cur_level = maybe_layer->layerId();
  6859. if (!isBatchedAtLevel(self, cur_level)) {
  6860. return at::_ops::kthvalue::call(self, k, dim, keepdim);
  6861. }
  6862. Tensor self_value;
  6863. optional<int64_t> self_bdim;
  6864. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  6865. auto results = batch_rule(self_value, self_bdim, k, dim, keepdim);
  6866. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  6867. }
  6868. template <typename batch_rule_t, batch_rule_t batch_rule>
  6869. ::std::tuple<at::Tensor,at::Tensor> kthvalue_dimname_generated_plumbing(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim) {
  6870. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6871. auto maybe_layer = maybeCurrentDynamicLayer();
  6872. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  6873. int64_t cur_level = maybe_layer->layerId();
  6874. if (!isBatchedAtLevel(self, cur_level)) {
  6875. return at::_ops::kthvalue_dimname::call(self, k, dim, keepdim);
  6876. }
  6877. Tensor self_value;
  6878. optional<int64_t> self_bdim;
  6879. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  6880. auto results = batch_rule(self_value, self_bdim, k, dim, keepdim);
  6881. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  6882. }
  6883. template <typename batch_rule_t, batch_rule_t batch_rule>
  6884. at::Tensor layer_norm_generated_plumbing(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps, bool cudnn_enable) {
  6885. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6886. auto maybe_layer = maybeCurrentDynamicLayer();
  6887. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  6888. int64_t cur_level = maybe_layer->layerId();
  6889. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  6890. return at::_ops::layer_norm::call(input, normalized_shape, weight, bias, eps, cudnn_enable);
  6891. }
  6892. Tensor input_value;
  6893. optional<int64_t> input_bdim;
  6894. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  6895. optional<Tensor> weight_value;
  6896. optional<int64_t> weight_bdim;
  6897. if (weight) {
  6898. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
  6899. }
  6900. optional<Tensor> bias_value;
  6901. optional<int64_t> bias_bdim;
  6902. if (bias) {
  6903. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  6904. }
  6905. auto results = batch_rule(input_value, input_bdim, normalized_shape, weight_value, weight_bdim, bias_value, bias_bdim, eps, cudnn_enable);
  6906. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  6907. }
  6908. template <typename batch_rule_t, batch_rule_t batch_rule>
  6909. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm_generated_plumbing(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps) {
  6910. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6911. auto maybe_layer = maybeCurrentDynamicLayer();
  6912. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  6913. int64_t cur_level = maybe_layer->layerId();
  6914. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  6915. return at::_ops::native_layer_norm::call(input, normalized_shape, weight, bias, eps);
  6916. }
  6917. Tensor input_value;
  6918. optional<int64_t> input_bdim;
  6919. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  6920. optional<Tensor> weight_value;
  6921. optional<int64_t> weight_bdim;
  6922. if (weight) {
  6923. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
  6924. }
  6925. optional<Tensor> bias_value;
  6926. optional<int64_t> bias_bdim;
  6927. if (bias) {
  6928. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  6929. }
  6930. auto results = batch_rule(input_value, input_bdim, normalized_shape, weight_value, weight_bdim, bias_value, bias_bdim, eps);
  6931. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  6932. }
  6933. template <typename batch_rule_t, batch_rule_t batch_rule>
  6934. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask) {
  6935. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6936. auto maybe_layer = maybeCurrentDynamicLayer();
  6937. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  6938. int64_t cur_level = maybe_layer->layerId();
  6939. if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(rstd, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  6940. return at::_ops::native_layer_norm_backward::call(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask);
  6941. }
  6942. Tensor grad_out_value;
  6943. optional<int64_t> grad_out_bdim;
  6944. std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level);
  6945. Tensor input_value;
  6946. optional<int64_t> input_bdim;
  6947. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  6948. Tensor mean_value;
  6949. optional<int64_t> mean_bdim;
  6950. std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level);
  6951. Tensor rstd_value;
  6952. optional<int64_t> rstd_bdim;
  6953. std::tie(rstd_value, rstd_bdim) = unwrapTensorAtLevel(rstd, cur_level);
  6954. optional<Tensor> weight_value;
  6955. optional<int64_t> weight_bdim;
  6956. if (weight) {
  6957. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
  6958. }
  6959. optional<Tensor> bias_value;
  6960. optional<int64_t> bias_bdim;
  6961. if (bias) {
  6962. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  6963. }
  6964. auto results = batch_rule(grad_out_value, grad_out_bdim, input_value, input_bdim, normalized_shape, mean_value, mean_bdim, rstd_value, rstd_bdim, weight_value, weight_bdim, bias_value, bias_bdim, output_mask);
  6965. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  6966. }
  6967. template <typename batch_rule_t, batch_rule_t batch_rule>
  6968. at::Tensor nan_to_num_generated_plumbing(const at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf) {
  6969. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6970. auto maybe_layer = maybeCurrentDynamicLayer();
  6971. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  6972. int64_t cur_level = maybe_layer->layerId();
  6973. if (!isBatchedAtLevel(self, cur_level)) {
  6974. return at::_ops::nan_to_num::call(self, nan, posinf, neginf);
  6975. }
  6976. Tensor self_value;
  6977. optional<int64_t> self_bdim;
  6978. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  6979. auto results = batch_rule(self_value, self_bdim, nan, posinf, neginf);
  6980. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  6981. }
  6982. template <typename batch_rule_t, batch_rule_t batch_rule>
  6983. at::Tensor & nan_to_num__generated_plumbing(at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf) {
  6984. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  6985. auto maybe_layer = maybeCurrentDynamicLayer();
  6986. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  6987. int64_t cur_level = maybe_layer->layerId();
  6988. if (!isBatchedAtLevel(self, cur_level)) {
  6989. return at::_ops::nan_to_num_::call(self, nan, posinf, neginf);
  6990. }
  6991. Tensor self_value;
  6992. optional<int64_t> self_bdim;
  6993. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  6994. batch_rule(self_value, self_bdim, nan, posinf, neginf);
  6995. return self;
  6996. }
  6997. template <typename batch_rule_t, batch_rule_t batch_rule>
  6998. at::Tensor linear_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias) {
  6999. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7000. auto maybe_layer = maybeCurrentDynamicLayer();
  7001. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7002. int64_t cur_level = maybe_layer->layerId();
  7003. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  7004. return at::_ops::linear::call(input, weight, bias);
  7005. }
  7006. Tensor input_value;
  7007. optional<int64_t> input_bdim;
  7008. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  7009. Tensor weight_value;
  7010. optional<int64_t> weight_bdim;
  7011. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  7012. optional<Tensor> bias_value;
  7013. optional<int64_t> bias_bdim;
  7014. if (bias) {
  7015. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  7016. }
  7017. auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim);
  7018. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7019. }
  7020. template <typename batch_rule_t, batch_rule_t batch_rule>
  7021. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> linear_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask) {
  7022. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7023. auto maybe_layer = maybeCurrentDynamicLayer();
  7024. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7025. int64_t cur_level = maybe_layer->layerId();
  7026. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
  7027. return at::_ops::linear_backward::call(self, grad_output, weight, output_mask);
  7028. }
  7029. Tensor self_value;
  7030. optional<int64_t> self_bdim;
  7031. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7032. Tensor grad_output_value;
  7033. optional<int64_t> grad_output_bdim;
  7034. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  7035. Tensor weight_value;
  7036. optional<int64_t> weight_bdim;
  7037. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  7038. auto results = batch_rule(self_value, self_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, output_mask);
  7039. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  7040. }
  7041. template <typename batch_rule_t, batch_rule_t batch_rule>
  7042. at::Tensor mkldnn_linear_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias) {
  7043. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7044. auto maybe_layer = maybeCurrentDynamicLayer();
  7045. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7046. int64_t cur_level = maybe_layer->layerId();
  7047. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  7048. return at::_ops::mkldnn_linear::call(self, weight, bias);
  7049. }
  7050. Tensor self_value;
  7051. optional<int64_t> self_bdim;
  7052. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7053. Tensor weight_value;
  7054. optional<int64_t> weight_bdim;
  7055. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  7056. optional<Tensor> bias_value;
  7057. optional<int64_t> bias_bdim;
  7058. if (bias) {
  7059. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  7060. }
  7061. auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim);
  7062. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7063. }
  7064. template <typename batch_rule_t, batch_rule_t batch_rule>
  7065. at::Tensor mkldnn_linear_backward_input_generated_plumbing(at::IntArrayRef input_size, const at::Tensor & grad_output, const at::Tensor & weight) {
  7066. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7067. auto maybe_layer = maybeCurrentDynamicLayer();
  7068. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7069. int64_t cur_level = maybe_layer->layerId();
  7070. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
  7071. return at::_ops::mkldnn_linear_backward_input::call(input_size, grad_output, weight);
  7072. }
  7073. Tensor grad_output_value;
  7074. optional<int64_t> grad_output_bdim;
  7075. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  7076. Tensor weight_value;
  7077. optional<int64_t> weight_bdim;
  7078. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  7079. auto results = batch_rule(input_size, grad_output_value, grad_output_bdim, weight_value, weight_bdim);
  7080. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7081. }
  7082. template <typename batch_rule_t, batch_rule_t batch_rule>
  7083. ::std::tuple<at::Tensor,at::Tensor> mkldnn_linear_backward_weights_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined) {
  7084. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7085. auto maybe_layer = maybeCurrentDynamicLayer();
  7086. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7087. int64_t cur_level = maybe_layer->layerId();
  7088. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
  7089. return at::_ops::mkldnn_linear_backward_weights::call(grad_output, input, weight, bias_defined);
  7090. }
  7091. Tensor grad_output_value;
  7092. optional<int64_t> grad_output_bdim;
  7093. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  7094. Tensor input_value;
  7095. optional<int64_t> input_bdim;
  7096. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  7097. Tensor weight_value;
  7098. optional<int64_t> weight_bdim;
  7099. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  7100. auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, weight_value, weight_bdim, bias_defined);
  7101. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  7102. }
  7103. template <typename batch_rule_t, batch_rule_t batch_rule>
  7104. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> mkldnn_linear_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask) {
  7105. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7106. auto maybe_layer = maybeCurrentDynamicLayer();
  7107. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7108. int64_t cur_level = maybe_layer->layerId();
  7109. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
  7110. return at::_ops::mkldnn_linear_backward::call(self, grad_output, weight, output_mask);
  7111. }
  7112. Tensor self_value;
  7113. optional<int64_t> self_bdim;
  7114. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7115. Tensor grad_output_value;
  7116. optional<int64_t> grad_output_bdim;
  7117. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  7118. Tensor weight_value;
  7119. optional<int64_t> weight_bdim;
  7120. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  7121. auto results = batch_rule(self_value, self_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, output_mask);
  7122. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  7123. }
  7124. template <typename batch_rule_t, batch_rule_t batch_rule>
  7125. at::Tensor fbgemm_linear_int8_weight_fp32_activation_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias) {
  7126. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7127. auto maybe_layer = maybeCurrentDynamicLayer();
  7128. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7129. int64_t cur_level = maybe_layer->layerId();
  7130. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(packed, cur_level) && !isBatchedAtLevel(col_offsets, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  7131. return at::_ops::fbgemm_linear_int8_weight_fp32_activation::call(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias);
  7132. }
  7133. Tensor input_value;
  7134. optional<int64_t> input_bdim;
  7135. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  7136. Tensor weight_value;
  7137. optional<int64_t> weight_bdim;
  7138. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  7139. Tensor packed_value;
  7140. optional<int64_t> packed_bdim;
  7141. std::tie(packed_value, packed_bdim) = unwrapTensorAtLevel(packed, cur_level);
  7142. Tensor col_offsets_value;
  7143. optional<int64_t> col_offsets_bdim;
  7144. std::tie(col_offsets_value, col_offsets_bdim) = unwrapTensorAtLevel(col_offsets, cur_level);
  7145. Tensor bias_value;
  7146. optional<int64_t> bias_bdim;
  7147. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias, cur_level);
  7148. auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, packed_value, packed_bdim, col_offsets_value, col_offsets_bdim, weight_scale, weight_zero_point, bias_value, bias_bdim);
  7149. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7150. }
  7151. template <typename batch_rule_t, batch_rule_t batch_rule>
  7152. at::Tensor fbgemm_linear_int8_weight_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias) {
  7153. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7154. auto maybe_layer = maybeCurrentDynamicLayer();
  7155. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7156. int64_t cur_level = maybe_layer->layerId();
  7157. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(packed, cur_level) && !isBatchedAtLevel(col_offsets, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  7158. return at::_ops::fbgemm_linear_int8_weight::call(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias);
  7159. }
  7160. Tensor input_value;
  7161. optional<int64_t> input_bdim;
  7162. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  7163. Tensor weight_value;
  7164. optional<int64_t> weight_bdim;
  7165. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  7166. Tensor packed_value;
  7167. optional<int64_t> packed_bdim;
  7168. std::tie(packed_value, packed_bdim) = unwrapTensorAtLevel(packed, cur_level);
  7169. Tensor col_offsets_value;
  7170. optional<int64_t> col_offsets_bdim;
  7171. std::tie(col_offsets_value, col_offsets_bdim) = unwrapTensorAtLevel(col_offsets, cur_level);
  7172. Tensor bias_value;
  7173. optional<int64_t> bias_bdim;
  7174. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias, cur_level);
  7175. auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, packed_value, packed_bdim, col_offsets_value, col_offsets_bdim, weight_scale, weight_zero_point, bias_value, bias_bdim);
  7176. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7177. }
  7178. template <typename batch_rule_t, batch_rule_t batch_rule>
  7179. at::Tensor fbgemm_pack_gemm_matrix_fp16_generated_plumbing(const at::Tensor & input) {
  7180. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7181. auto maybe_layer = maybeCurrentDynamicLayer();
  7182. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7183. int64_t cur_level = maybe_layer->layerId();
  7184. if (!isBatchedAtLevel(input, cur_level)) {
  7185. return at::_ops::fbgemm_pack_gemm_matrix_fp16::call(input);
  7186. }
  7187. Tensor input_value;
  7188. optional<int64_t> input_bdim;
  7189. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  7190. auto results = batch_rule(input_value, input_bdim);
  7191. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7192. }
  7193. template <typename batch_rule_t, batch_rule_t batch_rule>
  7194. at::Tensor fbgemm_linear_fp16_weight_fp32_activation_generated_plumbing(const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias) {
  7195. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7196. auto maybe_layer = maybeCurrentDynamicLayer();
  7197. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7198. int64_t cur_level = maybe_layer->layerId();
  7199. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(packed_weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  7200. return at::_ops::fbgemm_linear_fp16_weight_fp32_activation::call(input, packed_weight, bias);
  7201. }
  7202. Tensor input_value;
  7203. optional<int64_t> input_bdim;
  7204. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  7205. Tensor packed_weight_value;
  7206. optional<int64_t> packed_weight_bdim;
  7207. std::tie(packed_weight_value, packed_weight_bdim) = unwrapTensorAtLevel(packed_weight, cur_level);
  7208. Tensor bias_value;
  7209. optional<int64_t> bias_bdim;
  7210. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias, cur_level);
  7211. auto results = batch_rule(input_value, input_bdim, packed_weight_value, packed_weight_bdim, bias_value, bias_bdim);
  7212. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7213. }
  7214. template <typename batch_rule_t, batch_rule_t batch_rule>
  7215. at::Tensor fbgemm_linear_fp16_weight_generated_plumbing(const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias) {
  7216. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7217. auto maybe_layer = maybeCurrentDynamicLayer();
  7218. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7219. int64_t cur_level = maybe_layer->layerId();
  7220. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(packed_weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  7221. return at::_ops::fbgemm_linear_fp16_weight::call(input, packed_weight, bias);
  7222. }
  7223. Tensor input_value;
  7224. optional<int64_t> input_bdim;
  7225. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  7226. Tensor packed_weight_value;
  7227. optional<int64_t> packed_weight_bdim;
  7228. std::tie(packed_weight_value, packed_weight_bdim) = unwrapTensorAtLevel(packed_weight, cur_level);
  7229. Tensor bias_value;
  7230. optional<int64_t> bias_bdim;
  7231. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias, cur_level);
  7232. auto results = batch_rule(input_value, input_bdim, packed_weight_value, packed_weight_bdim, bias_value, bias_bdim);
  7233. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7234. }
  7235. template <typename batch_rule_t, batch_rule_t batch_rule>
  7236. at::Tensor fbgemm_pack_quantized_matrix_generated_plumbing(const at::Tensor & input) {
  7237. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7238. auto maybe_layer = maybeCurrentDynamicLayer();
  7239. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7240. int64_t cur_level = maybe_layer->layerId();
  7241. if (!isBatchedAtLevel(input, cur_level)) {
  7242. return at::_ops::fbgemm_pack_quantized_matrix::call(input);
  7243. }
  7244. Tensor input_value;
  7245. optional<int64_t> input_bdim;
  7246. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  7247. auto results = batch_rule(input_value, input_bdim);
  7248. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7249. }
  7250. template <typename batch_rule_t, batch_rule_t batch_rule>
  7251. at::Tensor fbgemm_pack_quantized_matrix_KN_generated_plumbing(const at::Tensor & input, int64_t K, int64_t N) {
  7252. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7253. auto maybe_layer = maybeCurrentDynamicLayer();
  7254. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7255. int64_t cur_level = maybe_layer->layerId();
  7256. if (!isBatchedAtLevel(input, cur_level)) {
  7257. return at::_ops::fbgemm_pack_quantized_matrix_KN::call(input, K, N);
  7258. }
  7259. Tensor input_value;
  7260. optional<int64_t> input_bdim;
  7261. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  7262. auto results = batch_rule(input_value, input_bdim, K, N);
  7263. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7264. }
  7265. template <typename batch_rule_t, batch_rule_t batch_rule>
  7266. at::Tensor ldexp_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  7267. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7268. auto maybe_layer = maybeCurrentDynamicLayer();
  7269. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7270. int64_t cur_level = maybe_layer->layerId();
  7271. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  7272. return at::_ops::ldexp_Tensor::call(self, other);
  7273. }
  7274. Tensor self_value;
  7275. optional<int64_t> self_bdim;
  7276. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7277. Tensor other_value;
  7278. optional<int64_t> other_bdim;
  7279. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  7280. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  7281. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7282. }
  7283. template <typename batch_rule_t, batch_rule_t batch_rule>
  7284. at::Tensor & ldexp__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  7285. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7286. auto maybe_layer = maybeCurrentDynamicLayer();
  7287. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  7288. int64_t cur_level = maybe_layer->layerId();
  7289. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  7290. return at::_ops::ldexp_::call(self, other);
  7291. }
  7292. Tensor self_value;
  7293. optional<int64_t> self_bdim;
  7294. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7295. Tensor other_value;
  7296. optional<int64_t> other_bdim;
  7297. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  7298. batch_rule(self_value, self_bdim, other_value, other_bdim);
  7299. return self;
  7300. }
  7301. template <typename batch_rule_t, batch_rule_t batch_rule>
  7302. at::Tensor log_generated_plumbing(const at::Tensor & self) {
  7303. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7304. auto maybe_layer = maybeCurrentDynamicLayer();
  7305. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7306. int64_t cur_level = maybe_layer->layerId();
  7307. if (!isBatchedAtLevel(self, cur_level)) {
  7308. return at::_ops::log::call(self);
  7309. }
  7310. Tensor self_value;
  7311. optional<int64_t> self_bdim;
  7312. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7313. auto results = batch_rule(self_value, self_bdim);
  7314. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7315. }
  7316. template <typename batch_rule_t, batch_rule_t batch_rule>
  7317. at::Tensor & log__generated_plumbing(at::Tensor & self) {
  7318. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7319. auto maybe_layer = maybeCurrentDynamicLayer();
  7320. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  7321. int64_t cur_level = maybe_layer->layerId();
  7322. if (!isBatchedAtLevel(self, cur_level)) {
  7323. return at::_ops::log_::call(self);
  7324. }
  7325. Tensor self_value;
  7326. optional<int64_t> self_bdim;
  7327. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7328. batch_rule(self_value, self_bdim);
  7329. return self;
  7330. }
  7331. template <typename batch_rule_t, batch_rule_t batch_rule>
  7332. at::Tensor log10_generated_plumbing(const at::Tensor & self) {
  7333. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7334. auto maybe_layer = maybeCurrentDynamicLayer();
  7335. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7336. int64_t cur_level = maybe_layer->layerId();
  7337. if (!isBatchedAtLevel(self, cur_level)) {
  7338. return at::_ops::log10::call(self);
  7339. }
  7340. Tensor self_value;
  7341. optional<int64_t> self_bdim;
  7342. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7343. auto results = batch_rule(self_value, self_bdim);
  7344. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7345. }
  7346. template <typename batch_rule_t, batch_rule_t batch_rule>
  7347. at::Tensor & log10__generated_plumbing(at::Tensor & self) {
  7348. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7349. auto maybe_layer = maybeCurrentDynamicLayer();
  7350. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  7351. int64_t cur_level = maybe_layer->layerId();
  7352. if (!isBatchedAtLevel(self, cur_level)) {
  7353. return at::_ops::log10_::call(self);
  7354. }
  7355. Tensor self_value;
  7356. optional<int64_t> self_bdim;
  7357. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7358. batch_rule(self_value, self_bdim);
  7359. return self;
  7360. }
  7361. template <typename batch_rule_t, batch_rule_t batch_rule>
  7362. at::Tensor log1p_generated_plumbing(const at::Tensor & self) {
  7363. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7364. auto maybe_layer = maybeCurrentDynamicLayer();
  7365. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7366. int64_t cur_level = maybe_layer->layerId();
  7367. if (!isBatchedAtLevel(self, cur_level)) {
  7368. return at::_ops::log1p::call(self);
  7369. }
  7370. Tensor self_value;
  7371. optional<int64_t> self_bdim;
  7372. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7373. auto results = batch_rule(self_value, self_bdim);
  7374. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7375. }
  7376. template <typename batch_rule_t, batch_rule_t batch_rule>
  7377. at::Tensor & log1p__generated_plumbing(at::Tensor & self) {
  7378. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7379. auto maybe_layer = maybeCurrentDynamicLayer();
  7380. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  7381. int64_t cur_level = maybe_layer->layerId();
  7382. if (!isBatchedAtLevel(self, cur_level)) {
  7383. return at::_ops::log1p_::call(self);
  7384. }
  7385. Tensor self_value;
  7386. optional<int64_t> self_bdim;
  7387. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7388. batch_rule(self_value, self_bdim);
  7389. return self;
  7390. }
  7391. template <typename batch_rule_t, batch_rule_t batch_rule>
  7392. at::Tensor log2_generated_plumbing(const at::Tensor & self) {
  7393. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7394. auto maybe_layer = maybeCurrentDynamicLayer();
  7395. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7396. int64_t cur_level = maybe_layer->layerId();
  7397. if (!isBatchedAtLevel(self, cur_level)) {
  7398. return at::_ops::log2::call(self);
  7399. }
  7400. Tensor self_value;
  7401. optional<int64_t> self_bdim;
  7402. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7403. auto results = batch_rule(self_value, self_bdim);
  7404. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7405. }
  7406. template <typename batch_rule_t, batch_rule_t batch_rule>
  7407. at::Tensor & log2__generated_plumbing(at::Tensor & self) {
  7408. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7409. auto maybe_layer = maybeCurrentDynamicLayer();
  7410. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  7411. int64_t cur_level = maybe_layer->layerId();
  7412. if (!isBatchedAtLevel(self, cur_level)) {
  7413. return at::_ops::log2_::call(self);
  7414. }
  7415. Tensor self_value;
  7416. optional<int64_t> self_bdim;
  7417. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7418. batch_rule(self_value, self_bdim);
  7419. return self;
  7420. }
  7421. template <typename batch_rule_t, batch_rule_t batch_rule>
  7422. at::Tensor logaddexp_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  7423. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7424. auto maybe_layer = maybeCurrentDynamicLayer();
  7425. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7426. int64_t cur_level = maybe_layer->layerId();
  7427. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  7428. return at::_ops::logaddexp::call(self, other);
  7429. }
  7430. Tensor self_value;
  7431. optional<int64_t> self_bdim;
  7432. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7433. Tensor other_value;
  7434. optional<int64_t> other_bdim;
  7435. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  7436. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  7437. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7438. }
  7439. template <typename batch_rule_t, batch_rule_t batch_rule>
  7440. at::Tensor logaddexp2_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  7441. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7442. auto maybe_layer = maybeCurrentDynamicLayer();
  7443. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7444. int64_t cur_level = maybe_layer->layerId();
  7445. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  7446. return at::_ops::logaddexp2::call(self, other);
  7447. }
  7448. Tensor self_value;
  7449. optional<int64_t> self_bdim;
  7450. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7451. Tensor other_value;
  7452. optional<int64_t> other_bdim;
  7453. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  7454. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  7455. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7456. }
  7457. template <typename batch_rule_t, batch_rule_t batch_rule>
  7458. at::Tensor xlogy_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  7459. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7460. auto maybe_layer = maybeCurrentDynamicLayer();
  7461. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7462. int64_t cur_level = maybe_layer->layerId();
  7463. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  7464. return at::_ops::xlogy_Tensor::call(self, other);
  7465. }
  7466. Tensor self_value;
  7467. optional<int64_t> self_bdim;
  7468. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7469. Tensor other_value;
  7470. optional<int64_t> other_bdim;
  7471. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  7472. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  7473. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7474. }
  7475. template <typename batch_rule_t, batch_rule_t batch_rule>
  7476. at::Tensor xlogy_Scalar_Self_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
  7477. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7478. auto maybe_layer = maybeCurrentDynamicLayer();
  7479. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7480. int64_t cur_level = maybe_layer->layerId();
  7481. if (!isBatchedAtLevel(other, cur_level)) {
  7482. return at::_ops::xlogy_Scalar_Self::call(self, other);
  7483. }
  7484. Tensor other_value;
  7485. optional<int64_t> other_bdim;
  7486. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  7487. auto results = batch_rule(self, other_value, other_bdim);
  7488. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7489. }
  7490. template <typename batch_rule_t, batch_rule_t batch_rule>
  7491. at::Tensor xlogy_Scalar_Other_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
  7492. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7493. auto maybe_layer = maybeCurrentDynamicLayer();
  7494. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7495. int64_t cur_level = maybe_layer->layerId();
  7496. if (!isBatchedAtLevel(self, cur_level)) {
  7497. return at::_ops::xlogy_Scalar_Other::call(self, other);
  7498. }
  7499. Tensor self_value;
  7500. optional<int64_t> self_bdim;
  7501. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7502. auto results = batch_rule(self_value, self_bdim, other);
  7503. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7504. }
  7505. template <typename batch_rule_t, batch_rule_t batch_rule>
  7506. at::Tensor & xlogy__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  7507. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7508. auto maybe_layer = maybeCurrentDynamicLayer();
  7509. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  7510. int64_t cur_level = maybe_layer->layerId();
  7511. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  7512. return at::_ops::xlogy__Tensor::call(self, other);
  7513. }
  7514. Tensor self_value;
  7515. optional<int64_t> self_bdim;
  7516. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7517. Tensor other_value;
  7518. optional<int64_t> other_bdim;
  7519. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  7520. batch_rule(self_value, self_bdim, other_value, other_bdim);
  7521. return self;
  7522. }
  7523. template <typename batch_rule_t, batch_rule_t batch_rule>
  7524. at::Tensor & xlogy__Scalar_Other_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
  7525. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7526. auto maybe_layer = maybeCurrentDynamicLayer();
  7527. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  7528. int64_t cur_level = maybe_layer->layerId();
  7529. if (!isBatchedAtLevel(self, cur_level)) {
  7530. return at::_ops::xlogy__Scalar_Other::call(self, other);
  7531. }
  7532. Tensor self_value;
  7533. optional<int64_t> self_bdim;
  7534. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7535. batch_rule(self_value, self_bdim, other);
  7536. return self;
  7537. }
  7538. template <typename batch_rule_t, batch_rule_t batch_rule>
  7539. at::Tensor log_softmax_int_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
  7540. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7541. auto maybe_layer = maybeCurrentDynamicLayer();
  7542. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7543. int64_t cur_level = maybe_layer->layerId();
  7544. if (!isBatchedAtLevel(self, cur_level)) {
  7545. return at::_ops::log_softmax_int::call(self, dim, dtype);
  7546. }
  7547. Tensor self_value;
  7548. optional<int64_t> self_bdim;
  7549. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7550. auto results = batch_rule(self_value, self_bdim, dim, dtype);
  7551. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7552. }
  7553. template <typename batch_rule_t, batch_rule_t batch_rule>
  7554. at::Tensor log_softmax_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
  7555. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7556. auto maybe_layer = maybeCurrentDynamicLayer();
  7557. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7558. int64_t cur_level = maybe_layer->layerId();
  7559. if (!isBatchedAtLevel(self, cur_level)) {
  7560. return at::_ops::log_softmax_Dimname::call(self, dim, dtype);
  7561. }
  7562. Tensor self_value;
  7563. optional<int64_t> self_bdim;
  7564. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7565. auto results = batch_rule(self_value, self_bdim, dim, dtype);
  7566. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7567. }
  7568. template <typename batch_rule_t, batch_rule_t batch_rule>
  7569. at::Tensor _log_softmax_generated_plumbing(const at::Tensor & self, int64_t dim, bool half_to_float) {
  7570. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7571. auto maybe_layer = maybeCurrentDynamicLayer();
  7572. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7573. int64_t cur_level = maybe_layer->layerId();
  7574. if (!isBatchedAtLevel(self, cur_level)) {
  7575. return at::_ops::_log_softmax::call(self, dim, half_to_float);
  7576. }
  7577. Tensor self_value;
  7578. optional<int64_t> self_bdim;
  7579. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7580. auto results = batch_rule(self_value, self_bdim, dim, half_to_float);
  7581. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7582. }
  7583. template <typename batch_rule_t, batch_rule_t batch_rule>
  7584. at::Tensor _log_softmax_backward_data_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
  7585. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7586. auto maybe_layer = maybeCurrentDynamicLayer();
  7587. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7588. int64_t cur_level = maybe_layer->layerId();
  7589. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level)) {
  7590. return at::_ops::_log_softmax_backward_data::call(grad_output, output, dim, input_dtype);
  7591. }
  7592. Tensor grad_output_value;
  7593. optional<int64_t> grad_output_bdim;
  7594. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  7595. Tensor output_value;
  7596. optional<int64_t> output_bdim;
  7597. std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
  7598. auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, dim, input_dtype);
  7599. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7600. }
  7601. template <typename batch_rule_t, batch_rule_t batch_rule>
  7602. at::Tensor _logcumsumexp_generated_plumbing(const at::Tensor & self, int64_t dim) {
  7603. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7604. auto maybe_layer = maybeCurrentDynamicLayer();
  7605. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7606. int64_t cur_level = maybe_layer->layerId();
  7607. if (!isBatchedAtLevel(self, cur_level)) {
  7608. return at::_ops::_logcumsumexp::call(self, dim);
  7609. }
  7610. Tensor self_value;
  7611. optional<int64_t> self_bdim;
  7612. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7613. auto results = batch_rule(self_value, self_bdim, dim);
  7614. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7615. }
  7616. template <typename batch_rule_t, batch_rule_t batch_rule>
  7617. at::Tensor logcumsumexp_generated_plumbing(const at::Tensor & self, int64_t dim) {
  7618. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7619. auto maybe_layer = maybeCurrentDynamicLayer();
  7620. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7621. int64_t cur_level = maybe_layer->layerId();
  7622. if (!isBatchedAtLevel(self, cur_level)) {
  7623. return at::_ops::logcumsumexp::call(self, dim);
  7624. }
  7625. Tensor self_value;
  7626. optional<int64_t> self_bdim;
  7627. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7628. auto results = batch_rule(self_value, self_bdim, dim);
  7629. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7630. }
  7631. template <typename batch_rule_t, batch_rule_t batch_rule>
  7632. at::Tensor logcumsumexp_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim) {
  7633. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7634. auto maybe_layer = maybeCurrentDynamicLayer();
  7635. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7636. int64_t cur_level = maybe_layer->layerId();
  7637. if (!isBatchedAtLevel(self, cur_level)) {
  7638. return at::_ops::logcumsumexp_dimname::call(self, dim);
  7639. }
  7640. Tensor self_value;
  7641. optional<int64_t> self_bdim;
  7642. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7643. auto results = batch_rule(self_value, self_bdim, dim);
  7644. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7645. }
  7646. template <typename batch_rule_t, batch_rule_t batch_rule>
  7647. at::Tensor logsumexp_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
  7648. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7649. auto maybe_layer = maybeCurrentDynamicLayer();
  7650. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7651. int64_t cur_level = maybe_layer->layerId();
  7652. if (!isBatchedAtLevel(self, cur_level)) {
  7653. return at::_ops::logsumexp::call(self, dim, keepdim);
  7654. }
  7655. Tensor self_value;
  7656. optional<int64_t> self_bdim;
  7657. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7658. auto results = batch_rule(self_value, self_bdim, dim, keepdim);
  7659. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7660. }
  7661. template <typename batch_rule_t, batch_rule_t batch_rule>
  7662. at::Tensor logsumexp_names_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool keepdim) {
  7663. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7664. auto maybe_layer = maybeCurrentDynamicLayer();
  7665. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7666. int64_t cur_level = maybe_layer->layerId();
  7667. if (!isBatchedAtLevel(self, cur_level)) {
  7668. return at::_ops::logsumexp_names::call(self, dim, keepdim);
  7669. }
  7670. Tensor self_value;
  7671. optional<int64_t> self_bdim;
  7672. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7673. auto results = batch_rule(self_value, self_bdim, dim, keepdim);
  7674. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7675. }
  7676. template <typename batch_rule_t, batch_rule_t batch_rule>
  7677. at::Tensor margin_ranking_loss_generated_plumbing(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction) {
  7678. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7679. auto maybe_layer = maybeCurrentDynamicLayer();
  7680. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7681. int64_t cur_level = maybe_layer->layerId();
  7682. if (!isBatchedAtLevel(input1, cur_level) && !isBatchedAtLevel(input2, cur_level) && !isBatchedAtLevel(target, cur_level)) {
  7683. return at::_ops::margin_ranking_loss::call(input1, input2, target, margin, reduction);
  7684. }
  7685. Tensor input1_value;
  7686. optional<int64_t> input1_bdim;
  7687. std::tie(input1_value, input1_bdim) = unwrapTensorAtLevel(input1, cur_level);
  7688. Tensor input2_value;
  7689. optional<int64_t> input2_bdim;
  7690. std::tie(input2_value, input2_bdim) = unwrapTensorAtLevel(input2, cur_level);
  7691. Tensor target_value;
  7692. optional<int64_t> target_bdim;
  7693. std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
  7694. auto results = batch_rule(input1_value, input1_bdim, input2_value, input2_bdim, target_value, target_bdim, margin, reduction);
  7695. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7696. }
  7697. template <typename batch_rule_t, batch_rule_t batch_rule>
  7698. at::Tensor matmul_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  7699. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7700. auto maybe_layer = maybeCurrentDynamicLayer();
  7701. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7702. int64_t cur_level = maybe_layer->layerId();
  7703. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  7704. return at::_ops::matmul::call(self, other);
  7705. }
  7706. Tensor self_value;
  7707. optional<int64_t> self_bdim;
  7708. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7709. Tensor other_value;
  7710. optional<int64_t> other_bdim;
  7711. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  7712. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  7713. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7714. }
  7715. template <typename batch_rule_t, batch_rule_t batch_rule>
  7716. ::std::tuple<at::Tensor,at::Tensor> matmul_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array<bool,2> mask) {
  7717. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7718. auto maybe_layer = maybeCurrentDynamicLayer();
  7719. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7720. int64_t cur_level = maybe_layer->layerId();
  7721. if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  7722. return at::_ops::matmul_backward::call(grad, self, other, mask);
  7723. }
  7724. Tensor grad_value;
  7725. optional<int64_t> grad_bdim;
  7726. std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
  7727. Tensor self_value;
  7728. optional<int64_t> self_bdim;
  7729. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7730. Tensor other_value;
  7731. optional<int64_t> other_bdim;
  7732. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  7733. auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, other_value, other_bdim, mask);
  7734. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  7735. }
  7736. template <typename batch_rule_t, batch_rule_t batch_rule>
  7737. at::Tensor matrix_power_generated_plumbing(const at::Tensor & self, int64_t n) {
  7738. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7739. auto maybe_layer = maybeCurrentDynamicLayer();
  7740. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7741. int64_t cur_level = maybe_layer->layerId();
  7742. if (!isBatchedAtLevel(self, cur_level)) {
  7743. return at::_ops::matrix_power::call(self, n);
  7744. }
  7745. Tensor self_value;
  7746. optional<int64_t> self_bdim;
  7747. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7748. auto results = batch_rule(self_value, self_bdim, n);
  7749. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7750. }
  7751. template <typename batch_rule_t, batch_rule_t batch_rule>
  7752. at::Tensor matrix_exp_generated_plumbing(const at::Tensor & self) {
  7753. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7754. auto maybe_layer = maybeCurrentDynamicLayer();
  7755. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7756. int64_t cur_level = maybe_layer->layerId();
  7757. if (!isBatchedAtLevel(self, cur_level)) {
  7758. return at::_ops::matrix_exp::call(self);
  7759. }
  7760. Tensor self_value;
  7761. optional<int64_t> self_bdim;
  7762. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7763. auto results = batch_rule(self_value, self_bdim);
  7764. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7765. }
  7766. template <typename batch_rule_t, batch_rule_t batch_rule>
  7767. at::Tensor matrix_exp_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad) {
  7768. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7769. auto maybe_layer = maybeCurrentDynamicLayer();
  7770. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7771. int64_t cur_level = maybe_layer->layerId();
  7772. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad, cur_level)) {
  7773. return at::_ops::matrix_exp_backward::call(self, grad);
  7774. }
  7775. Tensor self_value;
  7776. optional<int64_t> self_bdim;
  7777. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7778. Tensor grad_value;
  7779. optional<int64_t> grad_bdim;
  7780. std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
  7781. auto results = batch_rule(self_value, self_bdim, grad_value, grad_bdim);
  7782. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7783. }
  7784. template <typename batch_rule_t, batch_rule_t batch_rule>
  7785. ::std::tuple<at::Tensor,at::Tensor> _aminmax_generated_plumbing(const at::Tensor & self) {
  7786. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7787. auto maybe_layer = maybeCurrentDynamicLayer();
  7788. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7789. int64_t cur_level = maybe_layer->layerId();
  7790. if (!isBatchedAtLevel(self, cur_level)) {
  7791. return at::_ops::_aminmax::call(self);
  7792. }
  7793. Tensor self_value;
  7794. optional<int64_t> self_bdim;
  7795. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7796. auto results = batch_rule(self_value, self_bdim);
  7797. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  7798. }
  7799. template <typename batch_rule_t, batch_rule_t batch_rule>
  7800. ::std::tuple<at::Tensor,at::Tensor> _aminmax_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) {
  7801. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7802. auto maybe_layer = maybeCurrentDynamicLayer();
  7803. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7804. int64_t cur_level = maybe_layer->layerId();
  7805. if (!isBatchedAtLevel(self, cur_level)) {
  7806. return at::_ops::_aminmax_dim::call(self, dim, keepdim);
  7807. }
  7808. Tensor self_value;
  7809. optional<int64_t> self_bdim;
  7810. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7811. auto results = batch_rule(self_value, self_bdim, dim, keepdim);
  7812. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  7813. }
  7814. template <typename batch_rule_t, batch_rule_t batch_rule>
  7815. ::std::tuple<at::Tensor,at::Tensor> aminmax_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim) {
  7816. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7817. auto maybe_layer = maybeCurrentDynamicLayer();
  7818. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7819. int64_t cur_level = maybe_layer->layerId();
  7820. if (!isBatchedAtLevel(self, cur_level)) {
  7821. return at::_ops::aminmax::call(self, dim, keepdim);
  7822. }
  7823. Tensor self_value;
  7824. optional<int64_t> self_bdim;
  7825. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7826. auto results = batch_rule(self_value, self_bdim, dim, keepdim);
  7827. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  7828. }
  7829. template <typename batch_rule_t, batch_rule_t batch_rule>
  7830. at::Tensor _compute_linear_combination_generated_plumbing(const at::Tensor & input, const at::Tensor & coefficients) {
  7831. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7832. auto maybe_layer = maybeCurrentDynamicLayer();
  7833. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7834. int64_t cur_level = maybe_layer->layerId();
  7835. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(coefficients, cur_level)) {
  7836. return at::_ops::_compute_linear_combination::call(input, coefficients);
  7837. }
  7838. Tensor input_value;
  7839. optional<int64_t> input_bdim;
  7840. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  7841. Tensor coefficients_value;
  7842. optional<int64_t> coefficients_bdim;
  7843. std::tie(coefficients_value, coefficients_bdim) = unwrapTensorAtLevel(coefficients, cur_level);
  7844. auto results = batch_rule(input_value, input_bdim, coefficients_value, coefficients_bdim);
  7845. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7846. }
  7847. template <typename batch_rule_t, batch_rule_t batch_rule>
  7848. ::std::tuple<at::Tensor,at::Tensor> max_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) {
  7849. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7850. auto maybe_layer = maybeCurrentDynamicLayer();
  7851. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7852. int64_t cur_level = maybe_layer->layerId();
  7853. if (!isBatchedAtLevel(self, cur_level)) {
  7854. return at::_ops::max_dim::call(self, dim, keepdim);
  7855. }
  7856. Tensor self_value;
  7857. optional<int64_t> self_bdim;
  7858. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7859. auto results = batch_rule(self_value, self_bdim, dim, keepdim);
  7860. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  7861. }
  7862. template <typename batch_rule_t, batch_rule_t batch_rule>
  7863. ::std::tuple<at::Tensor,at::Tensor> max_names_dim_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) {
  7864. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7865. auto maybe_layer = maybeCurrentDynamicLayer();
  7866. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7867. int64_t cur_level = maybe_layer->layerId();
  7868. if (!isBatchedAtLevel(self, cur_level)) {
  7869. return at::_ops::max_names_dim::call(self, dim, keepdim);
  7870. }
  7871. Tensor self_value;
  7872. optional<int64_t> self_bdim;
  7873. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7874. auto results = batch_rule(self_value, self_bdim, dim, keepdim);
  7875. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  7876. }
  7877. template <typename batch_rule_t, batch_rule_t batch_rule>
  7878. at::Tensor value_selecting_reduction_backward_generated_plumbing(const at::Tensor & grad, int64_t dim, const at::Tensor & indices, c10::SymIntArrayRef sizes, bool keepdim) {
  7879. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7880. auto maybe_layer = maybeCurrentDynamicLayer();
  7881. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7882. int64_t cur_level = maybe_layer->layerId();
  7883. if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
  7884. return at::_ops::value_selecting_reduction_backward::call(grad, dim, indices, sizes, keepdim);
  7885. }
  7886. Tensor grad_value;
  7887. optional<int64_t> grad_bdim;
  7888. std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
  7889. Tensor indices_value;
  7890. optional<int64_t> indices_bdim;
  7891. std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
  7892. auto results = batch_rule(grad_value, grad_bdim, dim, indices_value, indices_bdim, sizes, keepdim);
  7893. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7894. }
  7895. template <typename batch_rule_t, batch_rule_t batch_rule>
  7896. at::Tensor amax_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
  7897. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7898. auto maybe_layer = maybeCurrentDynamicLayer();
  7899. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7900. int64_t cur_level = maybe_layer->layerId();
  7901. if (!isBatchedAtLevel(self, cur_level)) {
  7902. return at::_ops::amax::call(self, dim, keepdim);
  7903. }
  7904. Tensor self_value;
  7905. optional<int64_t> self_bdim;
  7906. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7907. auto results = batch_rule(self_value, self_bdim, dim, keepdim);
  7908. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7909. }
  7910. template <typename batch_rule_t, batch_rule_t batch_rule>
  7911. ::std::tuple<at::Tensor,at::Tensor> max_pool1d_with_indices_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
  7912. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7913. auto maybe_layer = maybeCurrentDynamicLayer();
  7914. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7915. int64_t cur_level = maybe_layer->layerId();
  7916. if (!isBatchedAtLevel(self, cur_level)) {
  7917. return at::_ops::max_pool1d_with_indices::call(self, kernel_size, stride, padding, dilation, ceil_mode);
  7918. }
  7919. Tensor self_value;
  7920. optional<int64_t> self_bdim;
  7921. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7922. auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
  7923. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  7924. }
  7925. template <typename batch_rule_t, batch_rule_t batch_rule>
  7926. at::Tensor max_pool1d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
  7927. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7928. auto maybe_layer = maybeCurrentDynamicLayer();
  7929. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7930. int64_t cur_level = maybe_layer->layerId();
  7931. if (!isBatchedAtLevel(self, cur_level)) {
  7932. return at::_ops::max_pool1d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
  7933. }
  7934. Tensor self_value;
  7935. optional<int64_t> self_bdim;
  7936. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7937. auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
  7938. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7939. }
  7940. template <typename batch_rule_t, batch_rule_t batch_rule>
  7941. at::Tensor max_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
  7942. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7943. auto maybe_layer = maybeCurrentDynamicLayer();
  7944. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7945. int64_t cur_level = maybe_layer->layerId();
  7946. if (!isBatchedAtLevel(self, cur_level)) {
  7947. return at::_ops::max_pool2d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
  7948. }
  7949. Tensor self_value;
  7950. optional<int64_t> self_bdim;
  7951. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7952. auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
  7953. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7954. }
  7955. template <typename batch_rule_t, batch_rule_t batch_rule>
  7956. at::Tensor max_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
  7957. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7958. auto maybe_layer = maybeCurrentDynamicLayer();
  7959. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7960. int64_t cur_level = maybe_layer->layerId();
  7961. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
  7962. return at::_ops::max_pool2d_backward::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode);
  7963. }
  7964. Tensor grad_output_value;
  7965. optional<int64_t> grad_output_bdim;
  7966. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  7967. Tensor self_value;
  7968. optional<int64_t> self_bdim;
  7969. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7970. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
  7971. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7972. }
  7973. template <typename batch_rule_t, batch_rule_t batch_rule>
  7974. at::Tensor mkldnn_max_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
  7975. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7976. auto maybe_layer = maybeCurrentDynamicLayer();
  7977. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7978. int64_t cur_level = maybe_layer->layerId();
  7979. if (!isBatchedAtLevel(self, cur_level)) {
  7980. return at::_ops::mkldnn_max_pool2d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
  7981. }
  7982. Tensor self_value;
  7983. optional<int64_t> self_bdim;
  7984. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  7985. auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
  7986. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  7987. }
  7988. template <typename batch_rule_t, batch_rule_t batch_rule>
  7989. at::Tensor mkldnn_max_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
  7990. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  7991. auto maybe_layer = maybeCurrentDynamicLayer();
  7992. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  7993. int64_t cur_level = maybe_layer->layerId();
  7994. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(input, cur_level)) {
  7995. return at::_ops::mkldnn_max_pool2d_backward::call(grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode);
  7996. }
  7997. Tensor grad_output_value;
  7998. optional<int64_t> grad_output_bdim;
  7999. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  8000. Tensor output_value;
  8001. optional<int64_t> output_bdim;
  8002. std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
  8003. Tensor input_value;
  8004. optional<int64_t> input_bdim;
  8005. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  8006. auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, input_value, input_bdim, kernel_size, stride, padding, dilation, ceil_mode);
  8007. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  8008. }
  8009. template <typename batch_rule_t, batch_rule_t batch_rule>
  8010. at::Tensor mkldnn_max_pool3d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
  8011. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8012. auto maybe_layer = maybeCurrentDynamicLayer();
  8013. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8014. int64_t cur_level = maybe_layer->layerId();
  8015. if (!isBatchedAtLevel(self, cur_level)) {
  8016. return at::_ops::mkldnn_max_pool3d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
  8017. }
  8018. Tensor self_value;
  8019. optional<int64_t> self_bdim;
  8020. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  8021. auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
  8022. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  8023. }
  8024. template <typename batch_rule_t, batch_rule_t batch_rule>
  8025. at::Tensor mkldnn_max_pool3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
  8026. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8027. auto maybe_layer = maybeCurrentDynamicLayer();
  8028. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8029. int64_t cur_level = maybe_layer->layerId();
  8030. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(input, cur_level)) {
  8031. return at::_ops::mkldnn_max_pool3d_backward::call(grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode);
  8032. }
  8033. Tensor grad_output_value;
  8034. optional<int64_t> grad_output_bdim;
  8035. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  8036. Tensor output_value;
  8037. optional<int64_t> output_bdim;
  8038. std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
  8039. Tensor input_value;
  8040. optional<int64_t> input_bdim;
  8041. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  8042. auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, input_value, input_bdim, kernel_size, stride, padding, dilation, ceil_mode);
  8043. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  8044. }
  8045. template <typename batch_rule_t, batch_rule_t batch_rule>
  8046. at::Tensor quantized_max_pool1d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
  8047. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8048. auto maybe_layer = maybeCurrentDynamicLayer();
  8049. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8050. int64_t cur_level = maybe_layer->layerId();
  8051. if (!isBatchedAtLevel(self, cur_level)) {
  8052. return at::_ops::quantized_max_pool1d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
  8053. }
  8054. Tensor self_value;
  8055. optional<int64_t> self_bdim;
  8056. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  8057. auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
  8058. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  8059. }
  8060. template <typename batch_rule_t, batch_rule_t batch_rule>
  8061. at::Tensor quantized_max_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
  8062. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8063. auto maybe_layer = maybeCurrentDynamicLayer();
  8064. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8065. int64_t cur_level = maybe_layer->layerId();
  8066. if (!isBatchedAtLevel(self, cur_level)) {
  8067. return at::_ops::quantized_max_pool2d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
  8068. }
  8069. Tensor self_value;
  8070. optional<int64_t> self_bdim;
  8071. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  8072. auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
  8073. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  8074. }
  8075. template <typename batch_rule_t, batch_rule_t batch_rule>
  8076. at::Tensor max_pool3d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
  8077. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8078. auto maybe_layer = maybeCurrentDynamicLayer();
  8079. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8080. int64_t cur_level = maybe_layer->layerId();
  8081. if (!isBatchedAtLevel(self, cur_level)) {
  8082. return at::_ops::max_pool3d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
  8083. }
  8084. Tensor self_value;
  8085. optional<int64_t> self_bdim;
  8086. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  8087. auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
  8088. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  8089. }
  8090. template <typename batch_rule_t, batch_rule_t batch_rule>
  8091. at::Tensor mean_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
  8092. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8093. auto maybe_layer = maybeCurrentDynamicLayer();
  8094. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8095. int64_t cur_level = maybe_layer->layerId();
  8096. if (!isBatchedAtLevel(self, cur_level)) {
  8097. return at::_ops::mean::call(self, dtype);
  8098. }
  8099. Tensor self_value;
  8100. optional<int64_t> self_bdim;
  8101. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  8102. auto results = batch_rule(self_value, self_bdim, dtype);
  8103. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  8104. }
  8105. template <typename batch_rule_t, batch_rule_t batch_rule>
  8106. at::Tensor mean_dim_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
  8107. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8108. auto maybe_layer = maybeCurrentDynamicLayer();
  8109. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8110. int64_t cur_level = maybe_layer->layerId();
  8111. if (!isBatchedAtLevel(self, cur_level)) {
  8112. return at::_ops::mean_dim::call(self, dim, keepdim, dtype);
  8113. }
  8114. Tensor self_value;
  8115. optional<int64_t> self_bdim;
  8116. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  8117. auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
  8118. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  8119. }
  8120. template <typename batch_rule_t, batch_rule_t batch_rule>
  8121. at::Tensor mean_names_dim_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
  8122. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8123. auto maybe_layer = maybeCurrentDynamicLayer();
  8124. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8125. int64_t cur_level = maybe_layer->layerId();
  8126. if (!isBatchedAtLevel(self, cur_level)) {
  8127. return at::_ops::mean_names_dim::call(self, dim, keepdim, dtype);
  8128. }
  8129. Tensor self_value;
  8130. optional<int64_t> self_bdim;
  8131. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  8132. auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
  8133. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  8134. }
  8135. template <typename batch_rule_t, batch_rule_t batch_rule>
  8136. at::Tensor nanmean_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
  8137. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8138. auto maybe_layer = maybeCurrentDynamicLayer();
  8139. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8140. int64_t cur_level = maybe_layer->layerId();
  8141. if (!isBatchedAtLevel(self, cur_level)) {
  8142. return at::_ops::nanmean::call(self, dim, keepdim, dtype);
  8143. }
  8144. Tensor self_value;
  8145. optional<int64_t> self_bdim;
  8146. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  8147. auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
  8148. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  8149. }
  8150. template <typename batch_rule_t, batch_rule_t batch_rule>
  8151. at::Tensor median_generated_plumbing(const at::Tensor & self) {
  8152. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8153. auto maybe_layer = maybeCurrentDynamicLayer();
  8154. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8155. int64_t cur_level = maybe_layer->layerId();
  8156. if (!isBatchedAtLevel(self, cur_level)) {
  8157. return at::_ops::median::call(self);
  8158. }
  8159. Tensor self_value;
  8160. optional<int64_t> self_bdim;
  8161. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  8162. auto results = batch_rule(self_value, self_bdim);
  8163. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  8164. }
  8165. template <typename batch_rule_t, batch_rule_t batch_rule>
  8166. ::std::tuple<at::Tensor,at::Tensor> median_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) {
  8167. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8168. auto maybe_layer = maybeCurrentDynamicLayer();
  8169. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8170. int64_t cur_level = maybe_layer->layerId();
  8171. if (!isBatchedAtLevel(self, cur_level)) {
  8172. return at::_ops::median_dim::call(self, dim, keepdim);
  8173. }
  8174. Tensor self_value;
  8175. optional<int64_t> self_bdim;
  8176. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  8177. auto results = batch_rule(self_value, self_bdim, dim, keepdim);
  8178. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  8179. }
  8180. template <typename batch_rule_t, batch_rule_t batch_rule>
  8181. ::std::tuple<at::Tensor,at::Tensor> median_names_dim_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) {
  8182. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8183. auto maybe_layer = maybeCurrentDynamicLayer();
  8184. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8185. int64_t cur_level = maybe_layer->layerId();
  8186. if (!isBatchedAtLevel(self, cur_level)) {
  8187. return at::_ops::median_names_dim::call(self, dim, keepdim);
  8188. }
  8189. Tensor self_value;
  8190. optional<int64_t> self_bdim;
  8191. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  8192. auto results = batch_rule(self_value, self_bdim, dim, keepdim);
  8193. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  8194. }
  8195. template <typename batch_rule_t, batch_rule_t batch_rule>
  8196. at::Tensor nanmedian_generated_plumbing(const at::Tensor & self) {
  8197. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8198. auto maybe_layer = maybeCurrentDynamicLayer();
  8199. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8200. int64_t cur_level = maybe_layer->layerId();
  8201. if (!isBatchedAtLevel(self, cur_level)) {
  8202. return at::_ops::nanmedian::call(self);
  8203. }
  8204. Tensor self_value;
  8205. optional<int64_t> self_bdim;
  8206. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  8207. auto results = batch_rule(self_value, self_bdim);
  8208. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  8209. }
  8210. template <typename batch_rule_t, batch_rule_t batch_rule>
  8211. ::std::tuple<at::Tensor,at::Tensor> nanmedian_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) {
  8212. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8213. auto maybe_layer = maybeCurrentDynamicLayer();
  8214. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8215. int64_t cur_level = maybe_layer->layerId();
  8216. if (!isBatchedAtLevel(self, cur_level)) {
  8217. return at::_ops::nanmedian_dim::call(self, dim, keepdim);
  8218. }
  8219. Tensor self_value;
  8220. optional<int64_t> self_bdim;
  8221. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  8222. auto results = batch_rule(self_value, self_bdim, dim, keepdim);
  8223. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  8224. }
  8225. template <typename batch_rule_t, batch_rule_t batch_rule>
  8226. ::std::tuple<at::Tensor,at::Tensor> nanmedian_names_dim_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) {
  8227. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8228. auto maybe_layer = maybeCurrentDynamicLayer();
  8229. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8230. int64_t cur_level = maybe_layer->layerId();
  8231. if (!isBatchedAtLevel(self, cur_level)) {
  8232. return at::_ops::nanmedian_names_dim::call(self, dim, keepdim);
  8233. }
  8234. Tensor self_value;
  8235. optional<int64_t> self_bdim;
  8236. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  8237. auto results = batch_rule(self_value, self_bdim, dim, keepdim);
  8238. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  8239. }
  8240. template <typename batch_rule_t, batch_rule_t batch_rule>
  8241. ::std::tuple<at::Tensor,at::Tensor> min_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) {
  8242. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8243. auto maybe_layer = maybeCurrentDynamicLayer();
  8244. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8245. int64_t cur_level = maybe_layer->layerId();
  8246. if (!isBatchedAtLevel(self, cur_level)) {
  8247. return at::_ops::min_dim::call(self, dim, keepdim);
  8248. }
  8249. Tensor self_value;
  8250. optional<int64_t> self_bdim;
  8251. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  8252. auto results = batch_rule(self_value, self_bdim, dim, keepdim);
  8253. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  8254. }
  8255. template <typename batch_rule_t, batch_rule_t batch_rule>
  8256. ::std::tuple<at::Tensor,at::Tensor> min_names_dim_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) {
  8257. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8258. auto maybe_layer = maybeCurrentDynamicLayer();
  8259. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8260. int64_t cur_level = maybe_layer->layerId();
  8261. if (!isBatchedAtLevel(self, cur_level)) {
  8262. return at::_ops::min_names_dim::call(self, dim, keepdim);
  8263. }
  8264. Tensor self_value;
  8265. optional<int64_t> self_bdim;
  8266. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  8267. auto results = batch_rule(self_value, self_bdim, dim, keepdim);
  8268. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  8269. }
  8270. template <typename batch_rule_t, batch_rule_t batch_rule>
  8271. at::Tensor amin_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
  8272. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8273. auto maybe_layer = maybeCurrentDynamicLayer();
  8274. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8275. int64_t cur_level = maybe_layer->layerId();
  8276. if (!isBatchedAtLevel(self, cur_level)) {
  8277. return at::_ops::amin::call(self, dim, keepdim);
  8278. }
  8279. Tensor self_value;
  8280. optional<int64_t> self_bdim;
  8281. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  8282. auto results = batch_rule(self_value, self_bdim, dim, keepdim);
  8283. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  8284. }
  8285. template <typename batch_rule_t, batch_rule_t batch_rule>
  8286. at::Tensor _mps_convolution_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) {
  8287. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8288. auto maybe_layer = maybeCurrentDynamicLayer();
  8289. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8290. int64_t cur_level = maybe_layer->layerId();
  8291. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  8292. return at::_ops::_mps_convolution::call(self, weight, bias, padding, stride, dilation, groups);
  8293. }
  8294. Tensor self_value;
  8295. optional<int64_t> self_bdim;
  8296. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  8297. Tensor weight_value;
  8298. optional<int64_t> weight_bdim;
  8299. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  8300. optional<Tensor> bias_value;
  8301. optional<int64_t> bias_bdim;
  8302. if (bias) {
  8303. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  8304. }
  8305. auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, stride, dilation, groups);
  8306. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  8307. }
  8308. template <typename batch_rule_t, batch_rule_t batch_rule>
  8309. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> mps_convolution_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array<bool,3> output_mask) {
  8310. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8311. auto maybe_layer = maybeCurrentDynamicLayer();
  8312. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8313. int64_t cur_level = maybe_layer->layerId();
  8314. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
  8315. return at::_ops::mps_convolution_backward::call(self, grad_output, weight, padding, stride, dilation, groups, output_mask);
  8316. }
  8317. Tensor self_value;
  8318. optional<int64_t> self_bdim;
  8319. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  8320. Tensor grad_output_value;
  8321. optional<int64_t> grad_output_bdim;
  8322. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  8323. Tensor weight_value;
  8324. optional<int64_t> weight_bdim;
  8325. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  8326. auto results = batch_rule(self_value, self_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, padding, stride, dilation, groups, output_mask);
  8327. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  8328. }
  8329. template <typename batch_rule_t, batch_rule_t batch_rule>
  8330. at::Tensor mkldnn_convolution_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) {
  8331. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8332. auto maybe_layer = maybeCurrentDynamicLayer();
  8333. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8334. int64_t cur_level = maybe_layer->layerId();
  8335. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  8336. return at::_ops::mkldnn_convolution::call(self, weight, bias, padding, stride, dilation, groups);
  8337. }
  8338. Tensor self_value;
  8339. optional<int64_t> self_bdim;
  8340. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  8341. Tensor weight_value;
  8342. optional<int64_t> weight_bdim;
  8343. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  8344. optional<Tensor> bias_value;
  8345. optional<int64_t> bias_bdim;
  8346. if (bias) {
  8347. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  8348. }
  8349. auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, stride, dilation, groups);
  8350. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  8351. }
  8352. template <typename batch_rule_t, batch_rule_t batch_rule>
  8353. ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> mkldnn_rnn_layer_generated_plumbing(const at::Tensor & input, const at::Tensor & weight0, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & hx_, const at::Tensor & cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) {
  8354. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8355. auto maybe_layer = maybeCurrentDynamicLayer();
  8356. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8357. int64_t cur_level = maybe_layer->layerId();
  8358. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight0, cur_level) && !isBatchedAtLevel(weight1, cur_level) && !isBatchedAtLevel(weight2, cur_level) && !isBatchedAtLevel(weight3, cur_level) && !isBatchedAtLevel(hx_, cur_level) && !isBatchedAtLevel(cx_, cur_level)) {
  8359. return at::_ops::mkldnn_rnn_layer::call(input, weight0, weight1, weight2, weight3, hx_, cx_, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train);
  8360. }
  8361. Tensor input_value;
  8362. optional<int64_t> input_bdim;
  8363. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  8364. Tensor weight0_value;
  8365. optional<int64_t> weight0_bdim;
  8366. std::tie(weight0_value, weight0_bdim) = unwrapTensorAtLevel(weight0, cur_level);
  8367. Tensor weight1_value;
  8368. optional<int64_t> weight1_bdim;
  8369. std::tie(weight1_value, weight1_bdim) = unwrapTensorAtLevel(weight1, cur_level);
  8370. Tensor weight2_value;
  8371. optional<int64_t> weight2_bdim;
  8372. std::tie(weight2_value, weight2_bdim) = unwrapTensorAtLevel(weight2, cur_level);
  8373. Tensor weight3_value;
  8374. optional<int64_t> weight3_bdim;
  8375. std::tie(weight3_value, weight3_bdim) = unwrapTensorAtLevel(weight3, cur_level);
  8376. Tensor hx__value;
  8377. optional<int64_t> hx__bdim;
  8378. std::tie(hx__value, hx__bdim) = unwrapTensorAtLevel(hx_, cur_level);
  8379. Tensor cx__value;
  8380. optional<int64_t> cx__bdim;
  8381. std::tie(cx__value, cx__bdim) = unwrapTensorAtLevel(cx_, cur_level);
  8382. auto results = batch_rule(input_value, input_bdim, weight0_value, weight0_bdim, weight1_value, weight1_bdim, weight2_value, weight2_bdim, weight3_value, weight3_bdim, hx__value, hx__bdim, cx__value, cx__bdim, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train);
  8383. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
  8384. }
  8385. template <typename batch_rule_t, batch_rule_t batch_rule>
  8386. ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> mkldnn_rnn_layer_backward_generated_plumbing(const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace) {
  8387. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8388. auto maybe_layer = maybeCurrentDynamicLayer();
  8389. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8390. int64_t cur_level = maybe_layer->layerId();
  8391. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight1, cur_level) && !isBatchedAtLevel(weight2, cur_level) && !isBatchedAtLevel(weight3, cur_level) && !isBatchedAtLevel(weight4, cur_level) && !isBatchedAtLevel(hx_, cur_level) && !isBatchedAtLevel(cx_tmp, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(hy_, cur_level) && !isBatchedAtLevel(cy_, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(workspace, cur_level)) {
  8392. return at::_ops::mkldnn_rnn_layer_backward::call(input, weight1, weight2, weight3, weight4, hx_, cx_tmp, output, hy_, cy_, grad_output, grad_hy, grad_cy, reverse, mode, hidden_size, num_layers, has_biases, train, bidirectional, batch_sizes, batch_first, workspace);
  8393. }
  8394. Tensor input_value;
  8395. optional<int64_t> input_bdim;
  8396. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  8397. Tensor weight1_value;
  8398. optional<int64_t> weight1_bdim;
  8399. std::tie(weight1_value, weight1_bdim) = unwrapTensorAtLevel(weight1, cur_level);
  8400. Tensor weight2_value;
  8401. optional<int64_t> weight2_bdim;
  8402. std::tie(weight2_value, weight2_bdim) = unwrapTensorAtLevel(weight2, cur_level);
  8403. Tensor weight3_value;
  8404. optional<int64_t> weight3_bdim;
  8405. std::tie(weight3_value, weight3_bdim) = unwrapTensorAtLevel(weight3, cur_level);
  8406. Tensor weight4_value;
  8407. optional<int64_t> weight4_bdim;
  8408. std::tie(weight4_value, weight4_bdim) = unwrapTensorAtLevel(weight4, cur_level);
  8409. Tensor hx__value;
  8410. optional<int64_t> hx__bdim;
  8411. std::tie(hx__value, hx__bdim) = unwrapTensorAtLevel(hx_, cur_level);
  8412. Tensor cx_tmp_value;
  8413. optional<int64_t> cx_tmp_bdim;
  8414. std::tie(cx_tmp_value, cx_tmp_bdim) = unwrapTensorAtLevel(cx_tmp, cur_level);
  8415. Tensor output_value;
  8416. optional<int64_t> output_bdim;
  8417. std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
  8418. Tensor hy__value;
  8419. optional<int64_t> hy__bdim;
  8420. std::tie(hy__value, hy__bdim) = unwrapTensorAtLevel(hy_, cur_level);
  8421. Tensor cy__value;
  8422. optional<int64_t> cy__bdim;
  8423. std::tie(cy__value, cy__bdim) = unwrapTensorAtLevel(cy_, cur_level);
  8424. Tensor workspace_value;
  8425. optional<int64_t> workspace_bdim;
  8426. std::tie(workspace_value, workspace_bdim) = unwrapTensorAtLevel(workspace, cur_level);
  8427. optional<Tensor> grad_output_value;
  8428. optional<int64_t> grad_output_bdim;
  8429. if (grad_output) {
  8430. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output.value(), cur_level);
  8431. }
  8432. optional<Tensor> grad_hy_value;
  8433. optional<int64_t> grad_hy_bdim;
  8434. if (grad_hy) {
  8435. std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
  8436. }
  8437. optional<Tensor> grad_cy_value;
  8438. optional<int64_t> grad_cy_bdim;
  8439. if (grad_cy) {
  8440. std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
  8441. }
  8442. auto results = batch_rule(input_value, input_bdim, weight1_value, weight1_bdim, weight2_value, weight2_bdim, weight3_value, weight3_bdim, weight4_value, weight4_bdim, hx__value, hx__bdim, cx_tmp_value, cx_tmp_bdim, output_value, output_bdim, hy__value, hy__bdim, cy__value, cy__bdim, grad_output_value, grad_output_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, reverse, mode, hidden_size, num_layers, has_biases, train, bidirectional, batch_sizes, batch_first, workspace_value, workspace_bdim);
  8443. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level), makeBatched(std::get<10>(results), std::get<11>(results), cur_level), makeBatched(std::get<12>(results), std::get<13>(results), cur_level));
  8444. }
  8445. template <typename batch_rule_t, batch_rule_t batch_rule>
  8446. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> miopen_batch_norm_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon) {
  8447. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8448. auto maybe_layer = maybeCurrentDynamicLayer();
  8449. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8450. int64_t cur_level = maybe_layer->layerId();
  8451. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
  8452. return at::_ops::miopen_batch_norm::call(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon);
  8453. }
  8454. Tensor input_value;
  8455. optional<int64_t> input_bdim;
  8456. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  8457. Tensor weight_value;
  8458. optional<int64_t> weight_bdim;
  8459. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  8460. optional<Tensor> bias_value;
  8461. optional<int64_t> bias_bdim;
  8462. if (bias) {
  8463. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  8464. }
  8465. optional<Tensor> running_mean_value;
  8466. optional<int64_t> running_mean_bdim;
  8467. if (running_mean) {
  8468. std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
  8469. }
  8470. optional<Tensor> running_var_value;
  8471. optional<int64_t> running_var_bdim;
  8472. if (running_var) {
  8473. std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
  8474. }
  8475. auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, training, exponential_average_factor, epsilon);
  8476. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  8477. }
  8478. template <typename batch_rule_t, batch_rule_t batch_rule>
  8479. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> miopen_batch_norm_backward_generated_plumbing(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var, double epsilon) {
  8480. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8481. auto maybe_layer = maybeCurrentDynamicLayer();
  8482. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8483. int64_t cur_level = maybe_layer->layerId();
  8484. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level) && !isBatchedAtLevel(save_mean, cur_level) && !isBatchedAtLevel(save_var, cur_level)) {
  8485. return at::_ops::miopen_batch_norm_backward::call(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon);
  8486. }
  8487. Tensor input_value;
  8488. optional<int64_t> input_bdim;
  8489. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  8490. Tensor grad_output_value;
  8491. optional<int64_t> grad_output_bdim;
  8492. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  8493. Tensor weight_value;
  8494. optional<int64_t> weight_bdim;
  8495. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  8496. optional<Tensor> running_mean_value;
  8497. optional<int64_t> running_mean_bdim;
  8498. if (running_mean) {
  8499. std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
  8500. }
  8501. optional<Tensor> running_var_value;
  8502. optional<int64_t> running_var_bdim;
  8503. if (running_var) {
  8504. std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
  8505. }
  8506. optional<Tensor> save_mean_value;
  8507. optional<int64_t> save_mean_bdim;
  8508. if (save_mean) {
  8509. std::tie(save_mean_value, save_mean_bdim) = unwrapTensorAtLevel(save_mean.value(), cur_level);
  8510. }
  8511. optional<Tensor> save_var_value;
  8512. optional<int64_t> save_var_bdim;
  8513. if (save_var) {
  8514. std::tie(save_var_value, save_var_bdim) = unwrapTensorAtLevel(save_var.value(), cur_level);
  8515. }
  8516. auto results = batch_rule(input_value, input_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, save_mean_value, save_mean_bdim, save_var_value, save_var_bdim, epsilon);
  8517. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  8518. }
  8519. template <typename batch_rule_t, batch_rule_t batch_rule>
  8520. at::Tensor miopen_convolution_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
  8521. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8522. auto maybe_layer = maybeCurrentDynamicLayer();
  8523. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8524. int64_t cur_level = maybe_layer->layerId();
  8525. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  8526. return at::_ops::miopen_convolution::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic);
  8527. }
  8528. Tensor self_value;
  8529. optional<int64_t> self_bdim;
  8530. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  8531. Tensor weight_value;
  8532. optional<int64_t> weight_bdim;
  8533. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  8534. optional<Tensor> bias_value;
  8535. optional<int64_t> bias_bdim;
  8536. if (bias) {
  8537. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  8538. }
  8539. auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, stride, dilation, groups, benchmark, deterministic);
  8540. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  8541. }
  8542. template <typename batch_rule_t, batch_rule_t batch_rule>
  8543. at::Tensor miopen_convolution_transpose_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
  8544. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8545. auto maybe_layer = maybeCurrentDynamicLayer();
  8546. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8547. int64_t cur_level = maybe_layer->layerId();
  8548. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  8549. return at::_ops::miopen_convolution_transpose::call(self, weight, bias, padding, output_padding, stride, dilation, groups, benchmark, deterministic);
  8550. }
  8551. Tensor self_value;
  8552. optional<int64_t> self_bdim;
  8553. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  8554. Tensor weight_value;
  8555. optional<int64_t> weight_bdim;
  8556. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  8557. optional<Tensor> bias_value;
  8558. optional<int64_t> bias_bdim;
  8559. if (bias) {
  8560. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  8561. }
  8562. auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, output_padding, stride, dilation, groups, benchmark, deterministic);
  8563. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  8564. }
  8565. template <typename batch_rule_t, batch_rule_t batch_rule>
  8566. at::Tensor miopen_depthwise_convolution_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
  8567. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8568. auto maybe_layer = maybeCurrentDynamicLayer();
  8569. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8570. int64_t cur_level = maybe_layer->layerId();
  8571. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  8572. return at::_ops::miopen_depthwise_convolution::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic);
  8573. }
  8574. Tensor self_value;
  8575. optional<int64_t> self_bdim;
  8576. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  8577. Tensor weight_value;
  8578. optional<int64_t> weight_bdim;
  8579. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  8580. optional<Tensor> bias_value;
  8581. optional<int64_t> bias_bdim;
  8582. if (bias) {
  8583. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  8584. }
  8585. auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, stride, dilation, groups, benchmark, deterministic);
  8586. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  8587. }
  8588. template <typename batch_rule_t, batch_rule_t batch_rule>
  8589. at::Tensor miopen_convolution_relu_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
  8590. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8591. auto maybe_layer = maybeCurrentDynamicLayer();
  8592. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8593. int64_t cur_level = maybe_layer->layerId();
  8594. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  8595. return at::_ops::miopen_convolution_relu::call(self, weight, bias, stride, padding, dilation, groups);
  8596. }
  8597. Tensor self_value;
  8598. optional<int64_t> self_bdim;
  8599. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  8600. Tensor weight_value;
  8601. optional<int64_t> weight_bdim;
  8602. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  8603. optional<Tensor> bias_value;
  8604. optional<int64_t> bias_bdim;
  8605. if (bias) {
  8606. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  8607. }
  8608. auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
  8609. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  8610. }
  8611. template <typename batch_rule_t, batch_rule_t batch_rule>
  8612. at::Tensor miopen_convolution_add_relu_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional<at::Scalar> & alpha, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
  8613. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8614. auto maybe_layer = maybeCurrentDynamicLayer();
  8615. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8616. int64_t cur_level = maybe_layer->layerId();
  8617. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(z, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  8618. return at::_ops::miopen_convolution_add_relu::call(self, weight, z, alpha, bias, stride, padding, dilation, groups);
  8619. }
  8620. Tensor self_value;
  8621. optional<int64_t> self_bdim;
  8622. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  8623. Tensor weight_value;
  8624. optional<int64_t> weight_bdim;
  8625. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  8626. Tensor z_value;
  8627. optional<int64_t> z_bdim;
  8628. std::tie(z_value, z_bdim) = unwrapTensorAtLevel(z, cur_level);
  8629. optional<Tensor> bias_value;
  8630. optional<int64_t> bias_bdim;
  8631. if (bias) {
  8632. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  8633. }
  8634. auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, z_value, z_bdim, alpha, bias_value, bias_bdim, stride, padding, dilation, groups);
  8635. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  8636. }
  8637. template <typename batch_rule_t, batch_rule_t batch_rule>
  8638. ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> miopen_rnn_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state) {
  8639. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8640. auto maybe_layer = maybeCurrentDynamicLayer();
  8641. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8642. int64_t cur_level = maybe_layer->layerId();
  8643. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(dropout_state, cur_level)) {
  8644. return at::_ops::miopen_rnn::call(input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state);
  8645. }
  8646. Tensor input_value;
  8647. optional<int64_t> input_bdim;
  8648. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  8649. Tensor hx_value;
  8650. optional<int64_t> hx_bdim;
  8651. std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
  8652. optional<Tensor> cx_value;
  8653. optional<int64_t> cx_bdim;
  8654. if (cx) {
  8655. std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level);
  8656. }
  8657. optional<Tensor> dropout_state_value;
  8658. optional<int64_t> dropout_state_bdim;
  8659. if (dropout_state) {
  8660. std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level);
  8661. }
  8662. auto results = batch_rule(input_value, input_bdim, weight, weight_stride0, hx_value, hx_bdim, cx_value, cx_bdim, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim);
  8663. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level));
  8664. }
  8665. template <typename batch_rule_t, batch_rule_t batch_rule>
  8666. ::std::tuple<at::Tensor,at::Tensor,at::Tensor,::std::vector<at::Tensor>> miopen_rnn_backward_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask) {
  8667. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8668. auto maybe_layer = maybeCurrentDynamicLayer();
  8669. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8670. int64_t cur_level = maybe_layer->layerId();
  8671. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(weight_buf, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(dropout_state, cur_level) && !isBatchedAtLevel(reserve, cur_level)) {
  8672. return at::_ops::miopen_rnn_backward::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask);
  8673. }
  8674. Tensor input_value;
  8675. optional<int64_t> input_bdim;
  8676. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  8677. Tensor weight_buf_value;
  8678. optional<int64_t> weight_buf_bdim;
  8679. std::tie(weight_buf_value, weight_buf_bdim) = unwrapTensorAtLevel(weight_buf, cur_level);
  8680. Tensor hx_value;
  8681. optional<int64_t> hx_bdim;
  8682. std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
  8683. Tensor output_value;
  8684. optional<int64_t> output_bdim;
  8685. std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
  8686. Tensor reserve_value;
  8687. optional<int64_t> reserve_bdim;
  8688. std::tie(reserve_value, reserve_bdim) = unwrapTensorAtLevel(reserve, cur_level);
  8689. optional<Tensor> cx_value;
  8690. optional<int64_t> cx_bdim;
  8691. if (cx) {
  8692. std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level);
  8693. }
  8694. optional<Tensor> grad_output_value;
  8695. optional<int64_t> grad_output_bdim;
  8696. if (grad_output) {
  8697. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output.value(), cur_level);
  8698. }
  8699. optional<Tensor> grad_hy_value;
  8700. optional<int64_t> grad_hy_bdim;
  8701. if (grad_hy) {
  8702. std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
  8703. }
  8704. optional<Tensor> grad_cy_value;
  8705. optional<int64_t> grad_cy_bdim;
  8706. if (grad_cy) {
  8707. std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
  8708. }
  8709. optional<Tensor> dropout_state_value;
  8710. optional<int64_t> dropout_state_bdim;
  8711. if (dropout_state) {
  8712. std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level);
  8713. }
  8714. auto results = batch_rule(input_value, input_bdim, weight, weight_stride0, weight_buf_value, weight_buf_bdim, hx_value, hx_bdim, cx_value, cx_bdim, output_value, output_bdim, grad_output_value, grad_output_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim, reserve_value, reserve_bdim, output_mask);
  8715. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatchedVector(std::get<6>(results), std::get<7>(results), cur_level));
  8716. }
  8717. template <typename batch_rule_t, batch_rule_t batch_rule>
  8718. at::Tensor mm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2) {
  8719. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8720. auto maybe_layer = maybeCurrentDynamicLayer();
  8721. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8722. int64_t cur_level = maybe_layer->layerId();
  8723. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
  8724. return at::_ops::mm::call(self, mat2);
  8725. }
  8726. Tensor self_value;
  8727. optional<int64_t> self_bdim;
  8728. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  8729. Tensor mat2_value;
  8730. optional<int64_t> mat2_bdim;
  8731. std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
  8732. auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim);
  8733. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  8734. }
  8735. template <typename batch_rule_t, batch_rule_t batch_rule>
  8736. at::Tensor _sparse_mm_generated_plumbing(const at::Tensor & sparse, const at::Tensor & dense) {
  8737. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8738. auto maybe_layer = maybeCurrentDynamicLayer();
  8739. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8740. int64_t cur_level = maybe_layer->layerId();
  8741. if (!isBatchedAtLevel(sparse, cur_level) && !isBatchedAtLevel(dense, cur_level)) {
  8742. return at::_ops::_sparse_mm::call(sparse, dense);
  8743. }
  8744. Tensor sparse_value;
  8745. optional<int64_t> sparse_bdim;
  8746. std::tie(sparse_value, sparse_bdim) = unwrapTensorAtLevel(sparse, cur_level);
  8747. Tensor dense_value;
  8748. optional<int64_t> dense_bdim;
  8749. std::tie(dense_value, dense_bdim) = unwrapTensorAtLevel(dense, cur_level);
  8750. auto results = batch_rule(sparse_value, sparse_bdim, dense_value, dense_bdim);
  8751. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  8752. }
  8753. template <typename batch_rule_t, batch_rule_t batch_rule>
  8754. at::Tensor _sparse_mm_reduce_generated_plumbing(const at::Tensor & sparse, const at::Tensor & dense, c10::string_view reduce) {
  8755. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8756. auto maybe_layer = maybeCurrentDynamicLayer();
  8757. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8758. int64_t cur_level = maybe_layer->layerId();
  8759. if (!isBatchedAtLevel(sparse, cur_level) && !isBatchedAtLevel(dense, cur_level)) {
  8760. return at::_ops::_sparse_mm_reduce::call(sparse, dense, reduce);
  8761. }
  8762. Tensor sparse_value;
  8763. optional<int64_t> sparse_bdim;
  8764. std::tie(sparse_value, sparse_bdim) = unwrapTensorAtLevel(sparse, cur_level);
  8765. Tensor dense_value;
  8766. optional<int64_t> dense_bdim;
  8767. std::tie(dense_value, dense_bdim) = unwrapTensorAtLevel(dense, cur_level);
  8768. auto results = batch_rule(sparse_value, sparse_bdim, dense_value, dense_bdim, reduce);
  8769. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  8770. }
  8771. template <typename batch_rule_t, batch_rule_t batch_rule>
  8772. at::Tensor _sparse_sparse_matmul_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  8773. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8774. auto maybe_layer = maybeCurrentDynamicLayer();
  8775. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8776. int64_t cur_level = maybe_layer->layerId();
  8777. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  8778. return at::_ops::_sparse_sparse_matmul::call(self, other);
  8779. }
  8780. Tensor self_value;
  8781. optional<int64_t> self_bdim;
  8782. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  8783. Tensor other_value;
  8784. optional<int64_t> other_bdim;
  8785. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  8786. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  8787. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  8788. }
  8789. template <typename batch_rule_t, batch_rule_t batch_rule>
  8790. ::std::tuple<at::Tensor,at::Tensor> mode_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) {
  8791. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8792. auto maybe_layer = maybeCurrentDynamicLayer();
  8793. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8794. int64_t cur_level = maybe_layer->layerId();
  8795. if (!isBatchedAtLevel(self, cur_level)) {
  8796. return at::_ops::mode::call(self, dim, keepdim);
  8797. }
  8798. Tensor self_value;
  8799. optional<int64_t> self_bdim;
  8800. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  8801. auto results = batch_rule(self_value, self_bdim, dim, keepdim);
  8802. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  8803. }
  8804. template <typename batch_rule_t, batch_rule_t batch_rule>
  8805. ::std::tuple<at::Tensor,at::Tensor> mode_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) {
  8806. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8807. auto maybe_layer = maybeCurrentDynamicLayer();
  8808. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8809. int64_t cur_level = maybe_layer->layerId();
  8810. if (!isBatchedAtLevel(self, cur_level)) {
  8811. return at::_ops::mode_dimname::call(self, dim, keepdim);
  8812. }
  8813. Tensor self_value;
  8814. optional<int64_t> self_bdim;
  8815. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  8816. auto results = batch_rule(self_value, self_bdim, dim, keepdim);
  8817. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  8818. }
  8819. template <typename batch_rule_t, batch_rule_t batch_rule>
  8820. at::Tensor mul_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  8821. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8822. auto maybe_layer = maybeCurrentDynamicLayer();
  8823. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8824. int64_t cur_level = maybe_layer->layerId();
  8825. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  8826. return at::_ops::mul_Tensor::call(self, other);
  8827. }
  8828. Tensor self_value;
  8829. optional<int64_t> self_bdim;
  8830. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  8831. Tensor other_value;
  8832. optional<int64_t> other_bdim;
  8833. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  8834. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  8835. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  8836. }
  8837. template <typename batch_rule_t, batch_rule_t batch_rule>
  8838. at::Tensor & mul__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  8839. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8840. auto maybe_layer = maybeCurrentDynamicLayer();
  8841. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  8842. int64_t cur_level = maybe_layer->layerId();
  8843. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  8844. return at::_ops::mul__Tensor::call(self, other);
  8845. }
  8846. Tensor self_value;
  8847. optional<int64_t> self_bdim;
  8848. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  8849. Tensor other_value;
  8850. optional<int64_t> other_bdim;
  8851. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  8852. batch_rule(self_value, self_bdim, other_value, other_bdim);
  8853. return self;
  8854. }
  8855. template <typename batch_rule_t, batch_rule_t batch_rule>
  8856. at::Tensor mul_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
  8857. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8858. auto maybe_layer = maybeCurrentDynamicLayer();
  8859. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8860. int64_t cur_level = maybe_layer->layerId();
  8861. if (!isBatchedAtLevel(self, cur_level)) {
  8862. return at::_ops::mul_Scalar::call(self, other);
  8863. }
  8864. Tensor self_value;
  8865. optional<int64_t> self_bdim;
  8866. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  8867. auto results = batch_rule(self_value, self_bdim, other);
  8868. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  8869. }
  8870. template <typename batch_rule_t, batch_rule_t batch_rule>
  8871. at::Tensor & mul__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
  8872. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8873. auto maybe_layer = maybeCurrentDynamicLayer();
  8874. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  8875. int64_t cur_level = maybe_layer->layerId();
  8876. if (!isBatchedAtLevel(self, cur_level)) {
  8877. return at::_ops::mul__Scalar::call(self, other);
  8878. }
  8879. Tensor self_value;
  8880. optional<int64_t> self_bdim;
  8881. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  8882. batch_rule(self_value, self_bdim, other);
  8883. return self;
  8884. }
  8885. template <typename batch_rule_t, batch_rule_t batch_rule>
  8886. at::Tensor multiply_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  8887. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8888. auto maybe_layer = maybeCurrentDynamicLayer();
  8889. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8890. int64_t cur_level = maybe_layer->layerId();
  8891. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  8892. return at::_ops::multiply_Tensor::call(self, other);
  8893. }
  8894. Tensor self_value;
  8895. optional<int64_t> self_bdim;
  8896. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  8897. Tensor other_value;
  8898. optional<int64_t> other_bdim;
  8899. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  8900. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  8901. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  8902. }
  8903. template <typename batch_rule_t, batch_rule_t batch_rule>
  8904. at::Tensor & multiply__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  8905. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8906. auto maybe_layer = maybeCurrentDynamicLayer();
  8907. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  8908. int64_t cur_level = maybe_layer->layerId();
  8909. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  8910. return at::_ops::multiply__Tensor::call(self, other);
  8911. }
  8912. Tensor self_value;
  8913. optional<int64_t> self_bdim;
  8914. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  8915. Tensor other_value;
  8916. optional<int64_t> other_bdim;
  8917. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  8918. batch_rule(self_value, self_bdim, other_value, other_bdim);
  8919. return self;
  8920. }
  8921. template <typename batch_rule_t, batch_rule_t batch_rule>
  8922. at::Tensor multiply_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
  8923. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8924. auto maybe_layer = maybeCurrentDynamicLayer();
  8925. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8926. int64_t cur_level = maybe_layer->layerId();
  8927. if (!isBatchedAtLevel(self, cur_level)) {
  8928. return at::_ops::multiply_Scalar::call(self, other);
  8929. }
  8930. Tensor self_value;
  8931. optional<int64_t> self_bdim;
  8932. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  8933. auto results = batch_rule(self_value, self_bdim, other);
  8934. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  8935. }
  8936. template <typename batch_rule_t, batch_rule_t batch_rule>
  8937. at::Tensor & multiply__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
  8938. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8939. auto maybe_layer = maybeCurrentDynamicLayer();
  8940. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  8941. int64_t cur_level = maybe_layer->layerId();
  8942. if (!isBatchedAtLevel(self, cur_level)) {
  8943. return at::_ops::multiply__Scalar::call(self, other);
  8944. }
  8945. Tensor self_value;
  8946. optional<int64_t> self_bdim;
  8947. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  8948. batch_rule(self_value, self_bdim, other);
  8949. return self;
  8950. }
  8951. template <typename batch_rule_t, batch_rule_t batch_rule>
  8952. at::Tensor mv_generated_plumbing(const at::Tensor & self, const at::Tensor & vec) {
  8953. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8954. auto maybe_layer = maybeCurrentDynamicLayer();
  8955. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8956. int64_t cur_level = maybe_layer->layerId();
  8957. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(vec, cur_level)) {
  8958. return at::_ops::mv::call(self, vec);
  8959. }
  8960. Tensor self_value;
  8961. optional<int64_t> self_bdim;
  8962. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  8963. Tensor vec_value;
  8964. optional<int64_t> vec_bdim;
  8965. std::tie(vec_value, vec_bdim) = unwrapTensorAtLevel(vec, cur_level);
  8966. auto results = batch_rule(self_value, self_bdim, vec_value, vec_bdim);
  8967. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  8968. }
  8969. template <typename batch_rule_t, batch_rule_t batch_rule>
  8970. at::Tensor mvlgamma_generated_plumbing(const at::Tensor & self, int64_t p) {
  8971. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8972. auto maybe_layer = maybeCurrentDynamicLayer();
  8973. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  8974. int64_t cur_level = maybe_layer->layerId();
  8975. if (!isBatchedAtLevel(self, cur_level)) {
  8976. return at::_ops::mvlgamma::call(self, p);
  8977. }
  8978. Tensor self_value;
  8979. optional<int64_t> self_bdim;
  8980. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  8981. auto results = batch_rule(self_value, self_bdim, p);
  8982. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  8983. }
  8984. template <typename batch_rule_t, batch_rule_t batch_rule>
  8985. at::Tensor & mvlgamma__generated_plumbing(at::Tensor & self, int64_t p) {
  8986. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  8987. auto maybe_layer = maybeCurrentDynamicLayer();
  8988. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  8989. int64_t cur_level = maybe_layer->layerId();
  8990. if (!isBatchedAtLevel(self, cur_level)) {
  8991. return at::_ops::mvlgamma_::call(self, p);
  8992. }
  8993. Tensor self_value;
  8994. optional<int64_t> self_bdim;
  8995. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  8996. batch_rule(self_value, self_bdim, p);
  8997. return self;
  8998. }
  8999. template <typename batch_rule_t, batch_rule_t batch_rule>
  9000. at::Tensor narrow_copy_generated_plumbing(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
  9001. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9002. auto maybe_layer = maybeCurrentDynamicLayer();
  9003. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9004. int64_t cur_level = maybe_layer->layerId();
  9005. if (!isBatchedAtLevel(self, cur_level)) {
  9006. return at::_ops::narrow_copy::call(self, dim, start, length);
  9007. }
  9008. Tensor self_value;
  9009. optional<int64_t> self_bdim;
  9010. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  9011. auto results = batch_rule(self_value, self_bdim, dim, start, length);
  9012. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9013. }
  9014. template <typename batch_rule_t, batch_rule_t batch_rule>
  9015. at::Tensor narrow_generated_plumbing(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
  9016. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9017. auto maybe_layer = maybeCurrentDynamicLayer();
  9018. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9019. int64_t cur_level = maybe_layer->layerId();
  9020. if (!isBatchedAtLevel(self, cur_level)) {
  9021. return at::_ops::narrow::call(self, dim, start, length);
  9022. }
  9023. Tensor self_value;
  9024. optional<int64_t> self_bdim;
  9025. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  9026. auto results = batch_rule(self_value, self_bdim, dim, start, length);
  9027. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9028. }
  9029. template <typename batch_rule_t, batch_rule_t batch_rule>
  9030. at::Tensor narrow_Tensor_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & start, c10::SymInt length) {
  9031. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9032. auto maybe_layer = maybeCurrentDynamicLayer();
  9033. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9034. int64_t cur_level = maybe_layer->layerId();
  9035. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(start, cur_level)) {
  9036. return at::_ops::narrow_Tensor::call(self, dim, start, length);
  9037. }
  9038. Tensor self_value;
  9039. optional<int64_t> self_bdim;
  9040. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  9041. Tensor start_value;
  9042. optional<int64_t> start_bdim;
  9043. std::tie(start_value, start_bdim) = unwrapTensorAtLevel(start, cur_level);
  9044. auto results = batch_rule(self_value, self_bdim, dim, start_value, start_bdim, length);
  9045. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9046. }
  9047. template <typename batch_rule_t, batch_rule_t batch_rule>
  9048. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_batch_norm_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double momentum, double eps) {
  9049. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9050. auto maybe_layer = maybeCurrentDynamicLayer();
  9051. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9052. int64_t cur_level = maybe_layer->layerId();
  9053. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
  9054. return at::_ops::native_batch_norm::call(input, weight, bias, running_mean, running_var, training, momentum, eps);
  9055. }
  9056. Tensor input_value;
  9057. optional<int64_t> input_bdim;
  9058. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  9059. optional<Tensor> weight_value;
  9060. optional<int64_t> weight_bdim;
  9061. if (weight) {
  9062. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
  9063. }
  9064. optional<Tensor> bias_value;
  9065. optional<int64_t> bias_bdim;
  9066. if (bias) {
  9067. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  9068. }
  9069. optional<Tensor> running_mean_value;
  9070. optional<int64_t> running_mean_bdim;
  9071. if (running_mean) {
  9072. std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
  9073. }
  9074. optional<Tensor> running_var_value;
  9075. optional<int64_t> running_var_bdim;
  9076. if (running_var) {
  9077. std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
  9078. }
  9079. auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, training, momentum, eps);
  9080. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  9081. }
  9082. template <typename batch_rule_t, batch_rule_t batch_rule>
  9083. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit_no_stats_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, bool training, double momentum, double eps) {
  9084. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9085. auto maybe_layer = maybeCurrentDynamicLayer();
  9086. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9087. int64_t cur_level = maybe_layer->layerId();
  9088. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  9089. return at::_ops::_native_batch_norm_legit_no_stats::call(input, weight, bias, training, momentum, eps);
  9090. }
  9091. Tensor input_value;
  9092. optional<int64_t> input_bdim;
  9093. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  9094. optional<Tensor> weight_value;
  9095. optional<int64_t> weight_bdim;
  9096. if (weight) {
  9097. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
  9098. }
  9099. optional<Tensor> bias_value;
  9100. optional<int64_t> bias_bdim;
  9101. if (bias) {
  9102. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  9103. }
  9104. auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, training, momentum, eps);
  9105. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  9106. }
  9107. template <typename batch_rule_t, batch_rule_t batch_rule>
  9108. ::std::tuple<at::Tensor,at::Tensor> batch_norm_stats_generated_plumbing(const at::Tensor & input, double eps) {
  9109. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9110. auto maybe_layer = maybeCurrentDynamicLayer();
  9111. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9112. int64_t cur_level = maybe_layer->layerId();
  9113. if (!isBatchedAtLevel(input, cur_level)) {
  9114. return at::_ops::batch_norm_stats::call(input, eps);
  9115. }
  9116. Tensor input_value;
  9117. optional<int64_t> input_bdim;
  9118. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  9119. auto results = batch_rule(input_value, input_bdim, eps);
  9120. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  9121. }
  9122. template <typename batch_rule_t, batch_rule_t batch_rule>
  9123. at::Tensor batch_norm_elemt_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & invstd, double eps) {
  9124. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9125. auto maybe_layer = maybeCurrentDynamicLayer();
  9126. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9127. int64_t cur_level = maybe_layer->layerId();
  9128. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(invstd, cur_level)) {
  9129. return at::_ops::batch_norm_elemt::call(input, weight, bias, mean, invstd, eps);
  9130. }
  9131. Tensor input_value;
  9132. optional<int64_t> input_bdim;
  9133. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  9134. Tensor mean_value;
  9135. optional<int64_t> mean_bdim;
  9136. std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level);
  9137. Tensor invstd_value;
  9138. optional<int64_t> invstd_bdim;
  9139. std::tie(invstd_value, invstd_bdim) = unwrapTensorAtLevel(invstd, cur_level);
  9140. optional<Tensor> weight_value;
  9141. optional<int64_t> weight_bdim;
  9142. if (weight) {
  9143. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
  9144. }
  9145. optional<Tensor> bias_value;
  9146. optional<int64_t> bias_bdim;
  9147. if (bias) {
  9148. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  9149. }
  9150. auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, mean_value, mean_bdim, invstd_value, invstd_bdim, eps);
  9151. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9152. }
  9153. template <typename batch_rule_t, batch_rule_t batch_rule>
  9154. ::std::tuple<at::Tensor,at::Tensor> batch_norm_gather_stats_generated_plumbing(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, int64_t count) {
  9155. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9156. auto maybe_layer = maybeCurrentDynamicLayer();
  9157. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9158. int64_t cur_level = maybe_layer->layerId();
  9159. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(invstd, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
  9160. return at::_ops::batch_norm_gather_stats::call(input, mean, invstd, running_mean, running_var, momentum, eps, count);
  9161. }
  9162. Tensor input_value;
  9163. optional<int64_t> input_bdim;
  9164. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  9165. Tensor mean_value;
  9166. optional<int64_t> mean_bdim;
  9167. std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level);
  9168. Tensor invstd_value;
  9169. optional<int64_t> invstd_bdim;
  9170. std::tie(invstd_value, invstd_bdim) = unwrapTensorAtLevel(invstd, cur_level);
  9171. optional<Tensor> running_mean_value;
  9172. optional<int64_t> running_mean_bdim;
  9173. if (running_mean) {
  9174. std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
  9175. }
  9176. optional<Tensor> running_var_value;
  9177. optional<int64_t> running_var_bdim;
  9178. if (running_var) {
  9179. std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
  9180. }
  9181. auto results = batch_rule(input_value, input_bdim, mean_value, mean_bdim, invstd_value, invstd_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, momentum, eps, count);
  9182. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  9183. }
  9184. template <typename batch_rule_t, batch_rule_t batch_rule>
  9185. ::std::tuple<at::Tensor,at::Tensor> batch_norm_gather_stats_with_counts_generated_plumbing(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, const at::Tensor & counts) {
  9186. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9187. auto maybe_layer = maybeCurrentDynamicLayer();
  9188. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9189. int64_t cur_level = maybe_layer->layerId();
  9190. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(invstd, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level) && !isBatchedAtLevel(counts, cur_level)) {
  9191. return at::_ops::batch_norm_gather_stats_with_counts::call(input, mean, invstd, running_mean, running_var, momentum, eps, counts);
  9192. }
  9193. Tensor input_value;
  9194. optional<int64_t> input_bdim;
  9195. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  9196. Tensor mean_value;
  9197. optional<int64_t> mean_bdim;
  9198. std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level);
  9199. Tensor invstd_value;
  9200. optional<int64_t> invstd_bdim;
  9201. std::tie(invstd_value, invstd_bdim) = unwrapTensorAtLevel(invstd, cur_level);
  9202. Tensor counts_value;
  9203. optional<int64_t> counts_bdim;
  9204. std::tie(counts_value, counts_bdim) = unwrapTensorAtLevel(counts, cur_level);
  9205. optional<Tensor> running_mean_value;
  9206. optional<int64_t> running_mean_bdim;
  9207. if (running_mean) {
  9208. std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
  9209. }
  9210. optional<Tensor> running_var_value;
  9211. optional<int64_t> running_var_bdim;
  9212. if (running_var) {
  9213. std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
  9214. }
  9215. auto results = batch_rule(input_value, input_bdim, mean_value, mean_bdim, invstd_value, invstd_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, momentum, eps, counts_value, counts_bdim);
  9216. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  9217. }
  9218. template <typename batch_rule_t, batch_rule_t batch_rule>
  9219. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_batch_norm_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_invstd, bool train, double eps, ::std::array<bool,3> output_mask) {
  9220. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9221. auto maybe_layer = maybeCurrentDynamicLayer();
  9222. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9223. int64_t cur_level = maybe_layer->layerId();
  9224. if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level) && !isBatchedAtLevel(save_mean, cur_level) && !isBatchedAtLevel(save_invstd, cur_level)) {
  9225. return at::_ops::native_batch_norm_backward::call(grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask);
  9226. }
  9227. Tensor grad_out_value;
  9228. optional<int64_t> grad_out_bdim;
  9229. std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level);
  9230. Tensor input_value;
  9231. optional<int64_t> input_bdim;
  9232. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  9233. optional<Tensor> weight_value;
  9234. optional<int64_t> weight_bdim;
  9235. if (weight) {
  9236. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
  9237. }
  9238. optional<Tensor> running_mean_value;
  9239. optional<int64_t> running_mean_bdim;
  9240. if (running_mean) {
  9241. std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
  9242. }
  9243. optional<Tensor> running_var_value;
  9244. optional<int64_t> running_var_bdim;
  9245. if (running_var) {
  9246. std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
  9247. }
  9248. optional<Tensor> save_mean_value;
  9249. optional<int64_t> save_mean_bdim;
  9250. if (save_mean) {
  9251. std::tie(save_mean_value, save_mean_bdim) = unwrapTensorAtLevel(save_mean.value(), cur_level);
  9252. }
  9253. optional<Tensor> save_invstd_value;
  9254. optional<int64_t> save_invstd_bdim;
  9255. if (save_invstd) {
  9256. std::tie(save_invstd_value, save_invstd_bdim) = unwrapTensorAtLevel(save_invstd.value(), cur_level);
  9257. }
  9258. auto results = batch_rule(grad_out_value, grad_out_bdim, input_value, input_bdim, weight_value, weight_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, save_mean_value, save_mean_bdim, save_invstd_value, save_invstd_bdim, train, eps, output_mask);
  9259. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  9260. }
  9261. template <typename batch_rule_t, batch_rule_t batch_rule>
  9262. ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> batch_norm_backward_reduce_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & weight, bool input_g, bool weight_g, bool bias_g) {
  9263. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9264. auto maybe_layer = maybeCurrentDynamicLayer();
  9265. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9266. int64_t cur_level = maybe_layer->layerId();
  9267. if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(invstd, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
  9268. return at::_ops::batch_norm_backward_reduce::call(grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g);
  9269. }
  9270. Tensor grad_out_value;
  9271. optional<int64_t> grad_out_bdim;
  9272. std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level);
  9273. Tensor input_value;
  9274. optional<int64_t> input_bdim;
  9275. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  9276. Tensor mean_value;
  9277. optional<int64_t> mean_bdim;
  9278. std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level);
  9279. Tensor invstd_value;
  9280. optional<int64_t> invstd_bdim;
  9281. std::tie(invstd_value, invstd_bdim) = unwrapTensorAtLevel(invstd, cur_level);
  9282. optional<Tensor> weight_value;
  9283. optional<int64_t> weight_bdim;
  9284. if (weight) {
  9285. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
  9286. }
  9287. auto results = batch_rule(grad_out_value, grad_out_bdim, input_value, input_bdim, mean_value, mean_bdim, invstd_value, invstd_bdim, weight_value, weight_bdim, input_g, weight_g, bias_g);
  9288. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
  9289. }
  9290. template <typename batch_rule_t, batch_rule_t batch_rule>
  9291. at::Tensor batch_norm_backward_elemt_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & weight, const at::Tensor & mean_dy, const at::Tensor & mean_dy_xmu, const at::Tensor & count) {
  9292. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9293. auto maybe_layer = maybeCurrentDynamicLayer();
  9294. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9295. int64_t cur_level = maybe_layer->layerId();
  9296. if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(invstd, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(mean_dy, cur_level) && !isBatchedAtLevel(mean_dy_xmu, cur_level) && !isBatchedAtLevel(count, cur_level)) {
  9297. return at::_ops::batch_norm_backward_elemt::call(grad_out, input, mean, invstd, weight, mean_dy, mean_dy_xmu, count);
  9298. }
  9299. Tensor grad_out_value;
  9300. optional<int64_t> grad_out_bdim;
  9301. std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level);
  9302. Tensor input_value;
  9303. optional<int64_t> input_bdim;
  9304. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  9305. Tensor mean_value;
  9306. optional<int64_t> mean_bdim;
  9307. std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level);
  9308. Tensor invstd_value;
  9309. optional<int64_t> invstd_bdim;
  9310. std::tie(invstd_value, invstd_bdim) = unwrapTensorAtLevel(invstd, cur_level);
  9311. Tensor mean_dy_value;
  9312. optional<int64_t> mean_dy_bdim;
  9313. std::tie(mean_dy_value, mean_dy_bdim) = unwrapTensorAtLevel(mean_dy, cur_level);
  9314. Tensor mean_dy_xmu_value;
  9315. optional<int64_t> mean_dy_xmu_bdim;
  9316. std::tie(mean_dy_xmu_value, mean_dy_xmu_bdim) = unwrapTensorAtLevel(mean_dy_xmu, cur_level);
  9317. Tensor count_value;
  9318. optional<int64_t> count_bdim;
  9319. std::tie(count_value, count_bdim) = unwrapTensorAtLevel(count, cur_level);
  9320. optional<Tensor> weight_value;
  9321. optional<int64_t> weight_bdim;
  9322. if (weight) {
  9323. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
  9324. }
  9325. auto results = batch_rule(grad_out_value, grad_out_bdim, input_value, input_bdim, mean_value, mean_bdim, invstd_value, invstd_bdim, weight_value, weight_bdim, mean_dy_value, mean_dy_bdim, mean_dy_xmu_value, mean_dy_xmu_bdim, count_value, count_bdim);
  9326. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9327. }
  9328. template <typename batch_rule_t, batch_rule_t batch_rule>
  9329. ::std::tuple<at::Tensor,at::Tensor> batch_norm_update_stats_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum) {
  9330. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9331. auto maybe_layer = maybeCurrentDynamicLayer();
  9332. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9333. int64_t cur_level = maybe_layer->layerId();
  9334. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
  9335. return at::_ops::batch_norm_update_stats::call(input, running_mean, running_var, momentum);
  9336. }
  9337. Tensor input_value;
  9338. optional<int64_t> input_bdim;
  9339. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  9340. optional<Tensor> running_mean_value;
  9341. optional<int64_t> running_mean_bdim;
  9342. if (running_mean) {
  9343. std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
  9344. }
  9345. optional<Tensor> running_var_value;
  9346. optional<int64_t> running_var_bdim;
  9347. if (running_var) {
  9348. std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
  9349. }
  9350. auto results = batch_rule(input_value, input_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, momentum);
  9351. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  9352. }
  9353. template <typename batch_rule_t, batch_rule_t batch_rule>
  9354. at::Tensor _nnpack_spatial_convolution_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride) {
  9355. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9356. auto maybe_layer = maybeCurrentDynamicLayer();
  9357. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9358. int64_t cur_level = maybe_layer->layerId();
  9359. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  9360. return at::_ops::_nnpack_spatial_convolution::call(input, weight, bias, padding, stride);
  9361. }
  9362. Tensor input_value;
  9363. optional<int64_t> input_bdim;
  9364. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  9365. Tensor weight_value;
  9366. optional<int64_t> weight_bdim;
  9367. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  9368. optional<Tensor> bias_value;
  9369. optional<int64_t> bias_bdim;
  9370. if (bias) {
  9371. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  9372. }
  9373. auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, stride);
  9374. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9375. }
  9376. template <typename batch_rule_t, batch_rule_t batch_rule>
  9377. at::Tensor ones_like_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
  9378. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9379. auto maybe_layer = maybeCurrentDynamicLayer();
  9380. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9381. int64_t cur_level = maybe_layer->layerId();
  9382. if (!isBatchedAtLevel(self, cur_level)) {
  9383. return at::_ops::ones_like::call(self, dtype, layout, device, pin_memory, memory_format);
  9384. }
  9385. Tensor self_value;
  9386. optional<int64_t> self_bdim;
  9387. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  9388. auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, memory_format);
  9389. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9390. }
  9391. template <typename batch_rule_t, batch_rule_t batch_rule>
  9392. at::Tensor pairwise_distance_generated_plumbing(const at::Tensor & x1, const at::Tensor & x2, double p, double eps, bool keepdim) {
  9393. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9394. auto maybe_layer = maybeCurrentDynamicLayer();
  9395. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9396. int64_t cur_level = maybe_layer->layerId();
  9397. if (!isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level)) {
  9398. return at::_ops::pairwise_distance::call(x1, x2, p, eps, keepdim);
  9399. }
  9400. Tensor x1_value;
  9401. optional<int64_t> x1_bdim;
  9402. std::tie(x1_value, x1_bdim) = unwrapTensorAtLevel(x1, cur_level);
  9403. Tensor x2_value;
  9404. optional<int64_t> x2_bdim;
  9405. std::tie(x2_value, x2_bdim) = unwrapTensorAtLevel(x2, cur_level);
  9406. auto results = batch_rule(x1_value, x1_bdim, x2_value, x2_bdim, p, eps, keepdim);
  9407. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9408. }
  9409. template <typename batch_rule_t, batch_rule_t batch_rule>
  9410. at::Tensor cdist_generated_plumbing(const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional<int64_t> compute_mode) {
  9411. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9412. auto maybe_layer = maybeCurrentDynamicLayer();
  9413. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9414. int64_t cur_level = maybe_layer->layerId();
  9415. if (!isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level)) {
  9416. return at::_ops::cdist::call(x1, x2, p, compute_mode);
  9417. }
  9418. Tensor x1_value;
  9419. optional<int64_t> x1_bdim;
  9420. std::tie(x1_value, x1_bdim) = unwrapTensorAtLevel(x1, cur_level);
  9421. Tensor x2_value;
  9422. optional<int64_t> x2_bdim;
  9423. std::tie(x2_value, x2_bdim) = unwrapTensorAtLevel(x2, cur_level);
  9424. auto results = batch_rule(x1_value, x1_bdim, x2_value, x2_bdim, p, compute_mode);
  9425. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9426. }
  9427. template <typename batch_rule_t, batch_rule_t batch_rule>
  9428. at::Tensor _euclidean_dist_generated_plumbing(const at::Tensor & x1, const at::Tensor & x2) {
  9429. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9430. auto maybe_layer = maybeCurrentDynamicLayer();
  9431. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9432. int64_t cur_level = maybe_layer->layerId();
  9433. if (!isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level)) {
  9434. return at::_ops::_euclidean_dist::call(x1, x2);
  9435. }
  9436. Tensor x1_value;
  9437. optional<int64_t> x1_bdim;
  9438. std::tie(x1_value, x1_bdim) = unwrapTensorAtLevel(x1, cur_level);
  9439. Tensor x2_value;
  9440. optional<int64_t> x2_bdim;
  9441. std::tie(x2_value, x2_bdim) = unwrapTensorAtLevel(x2, cur_level);
  9442. auto results = batch_rule(x1_value, x1_bdim, x2_value, x2_bdim);
  9443. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9444. }
  9445. template <typename batch_rule_t, batch_rule_t batch_rule>
  9446. at::Tensor _cdist_forward_generated_plumbing(const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional<int64_t> compute_mode) {
  9447. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9448. auto maybe_layer = maybeCurrentDynamicLayer();
  9449. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9450. int64_t cur_level = maybe_layer->layerId();
  9451. if (!isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level)) {
  9452. return at::_ops::_cdist_forward::call(x1, x2, p, compute_mode);
  9453. }
  9454. Tensor x1_value;
  9455. optional<int64_t> x1_bdim;
  9456. std::tie(x1_value, x1_bdim) = unwrapTensorAtLevel(x1, cur_level);
  9457. Tensor x2_value;
  9458. optional<int64_t> x2_bdim;
  9459. std::tie(x2_value, x2_bdim) = unwrapTensorAtLevel(x2, cur_level);
  9460. auto results = batch_rule(x1_value, x1_bdim, x2_value, x2_bdim, p, compute_mode);
  9461. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9462. }
  9463. template <typename batch_rule_t, batch_rule_t batch_rule>
  9464. at::Tensor _cdist_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist) {
  9465. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9466. auto maybe_layer = maybeCurrentDynamicLayer();
  9467. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9468. int64_t cur_level = maybe_layer->layerId();
  9469. if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level) && !isBatchedAtLevel(cdist, cur_level)) {
  9470. return at::_ops::_cdist_backward::call(grad, x1, x2, p, cdist);
  9471. }
  9472. Tensor grad_value;
  9473. optional<int64_t> grad_bdim;
  9474. std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
  9475. Tensor x1_value;
  9476. optional<int64_t> x1_bdim;
  9477. std::tie(x1_value, x1_bdim) = unwrapTensorAtLevel(x1, cur_level);
  9478. Tensor x2_value;
  9479. optional<int64_t> x2_bdim;
  9480. std::tie(x2_value, x2_bdim) = unwrapTensorAtLevel(x2, cur_level);
  9481. Tensor cdist_value;
  9482. optional<int64_t> cdist_bdim;
  9483. std::tie(cdist_value, cdist_bdim) = unwrapTensorAtLevel(cdist, cur_level);
  9484. auto results = batch_rule(grad_value, grad_bdim, x1_value, x1_bdim, x2_value, x2_bdim, p, cdist_value, cdist_bdim);
  9485. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9486. }
  9487. template <typename batch_rule_t, batch_rule_t batch_rule>
  9488. at::Tensor pdist_generated_plumbing(const at::Tensor & self, double p) {
  9489. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9490. auto maybe_layer = maybeCurrentDynamicLayer();
  9491. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9492. int64_t cur_level = maybe_layer->layerId();
  9493. if (!isBatchedAtLevel(self, cur_level)) {
  9494. return at::_ops::pdist::call(self, p);
  9495. }
  9496. Tensor self_value;
  9497. optional<int64_t> self_bdim;
  9498. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  9499. auto results = batch_rule(self_value, self_bdim, p);
  9500. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9501. }
  9502. template <typename batch_rule_t, batch_rule_t batch_rule>
  9503. at::Tensor _pdist_forward_generated_plumbing(const at::Tensor & self, double p) {
  9504. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9505. auto maybe_layer = maybeCurrentDynamicLayer();
  9506. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9507. int64_t cur_level = maybe_layer->layerId();
  9508. if (!isBatchedAtLevel(self, cur_level)) {
  9509. return at::_ops::_pdist_forward::call(self, p);
  9510. }
  9511. Tensor self_value;
  9512. optional<int64_t> self_bdim;
  9513. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  9514. auto results = batch_rule(self_value, self_bdim, p);
  9515. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9516. }
  9517. template <typename batch_rule_t, batch_rule_t batch_rule>
  9518. at::Tensor _pdist_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist) {
  9519. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9520. auto maybe_layer = maybeCurrentDynamicLayer();
  9521. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9522. int64_t cur_level = maybe_layer->layerId();
  9523. if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(pdist, cur_level)) {
  9524. return at::_ops::_pdist_backward::call(grad, self, p, pdist);
  9525. }
  9526. Tensor grad_value;
  9527. optional<int64_t> grad_bdim;
  9528. std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
  9529. Tensor self_value;
  9530. optional<int64_t> self_bdim;
  9531. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  9532. Tensor pdist_value;
  9533. optional<int64_t> pdist_bdim;
  9534. std::tie(pdist_value, pdist_bdim) = unwrapTensorAtLevel(pdist, cur_level);
  9535. auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, p, pdist_value, pdist_bdim);
  9536. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9537. }
  9538. template <typename batch_rule_t, batch_rule_t batch_rule>
  9539. at::Tensor cosine_similarity_generated_plumbing(const at::Tensor & x1, const at::Tensor & x2, int64_t dim, double eps) {
  9540. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9541. auto maybe_layer = maybeCurrentDynamicLayer();
  9542. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9543. int64_t cur_level = maybe_layer->layerId();
  9544. if (!isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level)) {
  9545. return at::_ops::cosine_similarity::call(x1, x2, dim, eps);
  9546. }
  9547. Tensor x1_value;
  9548. optional<int64_t> x1_bdim;
  9549. std::tie(x1_value, x1_bdim) = unwrapTensorAtLevel(x1, cur_level);
  9550. Tensor x2_value;
  9551. optional<int64_t> x2_bdim;
  9552. std::tie(x2_value, x2_bdim) = unwrapTensorAtLevel(x2, cur_level);
  9553. auto results = batch_rule(x1_value, x1_bdim, x2_value, x2_bdim, dim, eps);
  9554. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9555. }
  9556. template <typename batch_rule_t, batch_rule_t batch_rule>
  9557. at::Tensor permute_generated_plumbing(const at::Tensor & self, at::IntArrayRef dims) {
  9558. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9559. auto maybe_layer = maybeCurrentDynamicLayer();
  9560. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9561. int64_t cur_level = maybe_layer->layerId();
  9562. if (!isBatchedAtLevel(self, cur_level)) {
  9563. return at::_ops::permute::call(self, dims);
  9564. }
  9565. Tensor self_value;
  9566. optional<int64_t> self_bdim;
  9567. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  9568. auto results = batch_rule(self_value, self_bdim, dims);
  9569. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9570. }
  9571. template <typename batch_rule_t, batch_rule_t batch_rule>
  9572. at::Tensor movedim_intlist_generated_plumbing(const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination) {
  9573. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9574. auto maybe_layer = maybeCurrentDynamicLayer();
  9575. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9576. int64_t cur_level = maybe_layer->layerId();
  9577. if (!isBatchedAtLevel(self, cur_level)) {
  9578. return at::_ops::movedim_intlist::call(self, source, destination);
  9579. }
  9580. Tensor self_value;
  9581. optional<int64_t> self_bdim;
  9582. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  9583. auto results = batch_rule(self_value, self_bdim, source, destination);
  9584. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9585. }
  9586. template <typename batch_rule_t, batch_rule_t batch_rule>
  9587. at::Tensor movedim_int_generated_plumbing(const at::Tensor & self, int64_t source, int64_t destination) {
  9588. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9589. auto maybe_layer = maybeCurrentDynamicLayer();
  9590. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9591. int64_t cur_level = maybe_layer->layerId();
  9592. if (!isBatchedAtLevel(self, cur_level)) {
  9593. return at::_ops::movedim_int::call(self, source, destination);
  9594. }
  9595. Tensor self_value;
  9596. optional<int64_t> self_bdim;
  9597. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  9598. auto results = batch_rule(self_value, self_bdim, source, destination);
  9599. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9600. }
  9601. template <typename batch_rule_t, batch_rule_t batch_rule>
  9602. at::Tensor moveaxis_intlist_generated_plumbing(const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination) {
  9603. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9604. auto maybe_layer = maybeCurrentDynamicLayer();
  9605. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9606. int64_t cur_level = maybe_layer->layerId();
  9607. if (!isBatchedAtLevel(self, cur_level)) {
  9608. return at::_ops::moveaxis_intlist::call(self, source, destination);
  9609. }
  9610. Tensor self_value;
  9611. optional<int64_t> self_bdim;
  9612. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  9613. auto results = batch_rule(self_value, self_bdim, source, destination);
  9614. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9615. }
  9616. template <typename batch_rule_t, batch_rule_t batch_rule>
  9617. at::Tensor moveaxis_int_generated_plumbing(const at::Tensor & self, int64_t source, int64_t destination) {
  9618. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9619. auto maybe_layer = maybeCurrentDynamicLayer();
  9620. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9621. int64_t cur_level = maybe_layer->layerId();
  9622. if (!isBatchedAtLevel(self, cur_level)) {
  9623. return at::_ops::moveaxis_int::call(self, source, destination);
  9624. }
  9625. Tensor self_value;
  9626. optional<int64_t> self_bdim;
  9627. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  9628. auto results = batch_rule(self_value, self_bdim, source, destination);
  9629. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9630. }
  9631. template <typename batch_rule_t, batch_rule_t batch_rule>
  9632. at::Tensor numpy_T_generated_plumbing(const at::Tensor & self) {
  9633. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9634. auto maybe_layer = maybeCurrentDynamicLayer();
  9635. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9636. int64_t cur_level = maybe_layer->layerId();
  9637. if (!isBatchedAtLevel(self, cur_level)) {
  9638. return at::_ops::numpy_T::call(self);
  9639. }
  9640. Tensor self_value;
  9641. optional<int64_t> self_bdim;
  9642. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  9643. auto results = batch_rule(self_value, self_bdim);
  9644. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9645. }
  9646. template <typename batch_rule_t, batch_rule_t batch_rule>
  9647. at::Tensor matrix_H_generated_plumbing(const at::Tensor & self) {
  9648. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9649. auto maybe_layer = maybeCurrentDynamicLayer();
  9650. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9651. int64_t cur_level = maybe_layer->layerId();
  9652. if (!isBatchedAtLevel(self, cur_level)) {
  9653. return at::_ops::matrix_H::call(self);
  9654. }
  9655. Tensor self_value;
  9656. optional<int64_t> self_bdim;
  9657. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  9658. auto results = batch_rule(self_value, self_bdim);
  9659. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9660. }
  9661. template <typename batch_rule_t, batch_rule_t batch_rule>
  9662. at::Tensor mT_generated_plumbing(const at::Tensor & self) {
  9663. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9664. auto maybe_layer = maybeCurrentDynamicLayer();
  9665. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9666. int64_t cur_level = maybe_layer->layerId();
  9667. if (!isBatchedAtLevel(self, cur_level)) {
  9668. return at::_ops::mT::call(self);
  9669. }
  9670. Tensor self_value;
  9671. optional<int64_t> self_bdim;
  9672. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  9673. auto results = batch_rule(self_value, self_bdim);
  9674. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9675. }
  9676. template <typename batch_rule_t, batch_rule_t batch_rule>
  9677. at::Tensor mH_generated_plumbing(const at::Tensor & self) {
  9678. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9679. auto maybe_layer = maybeCurrentDynamicLayer();
  9680. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9681. int64_t cur_level = maybe_layer->layerId();
  9682. if (!isBatchedAtLevel(self, cur_level)) {
  9683. return at::_ops::mH::call(self);
  9684. }
  9685. Tensor self_value;
  9686. optional<int64_t> self_bdim;
  9687. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  9688. auto results = batch_rule(self_value, self_bdim);
  9689. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9690. }
  9691. template <typename batch_rule_t, batch_rule_t batch_rule>
  9692. at::Tensor adjoint_generated_plumbing(const at::Tensor & self) {
  9693. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9694. auto maybe_layer = maybeCurrentDynamicLayer();
  9695. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9696. int64_t cur_level = maybe_layer->layerId();
  9697. if (!isBatchedAtLevel(self, cur_level)) {
  9698. return at::_ops::adjoint::call(self);
  9699. }
  9700. Tensor self_value;
  9701. optional<int64_t> self_bdim;
  9702. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  9703. auto results = batch_rule(self_value, self_bdim);
  9704. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9705. }
  9706. template <typename batch_rule_t, batch_rule_t batch_rule>
  9707. at::Tensor pixel_shuffle_generated_plumbing(const at::Tensor & self, int64_t upscale_factor) {
  9708. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9709. auto maybe_layer = maybeCurrentDynamicLayer();
  9710. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9711. int64_t cur_level = maybe_layer->layerId();
  9712. if (!isBatchedAtLevel(self, cur_level)) {
  9713. return at::_ops::pixel_shuffle::call(self, upscale_factor);
  9714. }
  9715. Tensor self_value;
  9716. optional<int64_t> self_bdim;
  9717. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  9718. auto results = batch_rule(self_value, self_bdim, upscale_factor);
  9719. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9720. }
  9721. template <typename batch_rule_t, batch_rule_t batch_rule>
  9722. at::Tensor pixel_unshuffle_generated_plumbing(const at::Tensor & self, int64_t downscale_factor) {
  9723. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9724. auto maybe_layer = maybeCurrentDynamicLayer();
  9725. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9726. int64_t cur_level = maybe_layer->layerId();
  9727. if (!isBatchedAtLevel(self, cur_level)) {
  9728. return at::_ops::pixel_unshuffle::call(self, downscale_factor);
  9729. }
  9730. Tensor self_value;
  9731. optional<int64_t> self_bdim;
  9732. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  9733. auto results = batch_rule(self_value, self_bdim, downscale_factor);
  9734. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9735. }
  9736. template <typename batch_rule_t, batch_rule_t batch_rule>
  9737. at::Tensor channel_shuffle_generated_plumbing(const at::Tensor & self, int64_t groups) {
  9738. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9739. auto maybe_layer = maybeCurrentDynamicLayer();
  9740. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9741. int64_t cur_level = maybe_layer->layerId();
  9742. if (!isBatchedAtLevel(self, cur_level)) {
  9743. return at::_ops::channel_shuffle::call(self, groups);
  9744. }
  9745. Tensor self_value;
  9746. optional<int64_t> self_bdim;
  9747. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  9748. auto results = batch_rule(self_value, self_bdim, groups);
  9749. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9750. }
  9751. template <typename batch_rule_t, batch_rule_t batch_rule>
  9752. at::Tensor native_channel_shuffle_generated_plumbing(const at::Tensor & self, int64_t groups) {
  9753. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9754. auto maybe_layer = maybeCurrentDynamicLayer();
  9755. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9756. int64_t cur_level = maybe_layer->layerId();
  9757. if (!isBatchedAtLevel(self, cur_level)) {
  9758. return at::_ops::native_channel_shuffle::call(self, groups);
  9759. }
  9760. Tensor self_value;
  9761. optional<int64_t> self_bdim;
  9762. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  9763. auto results = batch_rule(self_value, self_bdim, groups);
  9764. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9765. }
  9766. template <typename batch_rule_t, batch_rule_t batch_rule>
  9767. at::Tensor pin_memory_generated_plumbing(const at::Tensor & self, c10::optional<at::Device> device) {
  9768. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9769. auto maybe_layer = maybeCurrentDynamicLayer();
  9770. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9771. int64_t cur_level = maybe_layer->layerId();
  9772. if (!isBatchedAtLevel(self, cur_level)) {
  9773. return at::_ops::pin_memory::call(self, device);
  9774. }
  9775. Tensor self_value;
  9776. optional<int64_t> self_bdim;
  9777. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  9778. auto results = batch_rule(self_value, self_bdim, device);
  9779. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9780. }
  9781. template <typename batch_rule_t, batch_rule_t batch_rule>
  9782. at::Tensor _pin_memory_generated_plumbing(const at::Tensor & self, c10::optional<at::Device> device) {
  9783. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9784. auto maybe_layer = maybeCurrentDynamicLayer();
  9785. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9786. int64_t cur_level = maybe_layer->layerId();
  9787. if (!isBatchedAtLevel(self, cur_level)) {
  9788. return at::_ops::_pin_memory::call(self, device);
  9789. }
  9790. Tensor self_value;
  9791. optional<int64_t> self_bdim;
  9792. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  9793. auto results = batch_rule(self_value, self_bdim, device);
  9794. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9795. }
  9796. template <typename batch_rule_t, batch_rule_t batch_rule>
  9797. at::Tensor pinverse_generated_plumbing(const at::Tensor & self, double rcond) {
  9798. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9799. auto maybe_layer = maybeCurrentDynamicLayer();
  9800. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9801. int64_t cur_level = maybe_layer->layerId();
  9802. if (!isBatchedAtLevel(self, cur_level)) {
  9803. return at::_ops::pinverse::call(self, rcond);
  9804. }
  9805. Tensor self_value;
  9806. optional<int64_t> self_bdim;
  9807. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  9808. auto results = batch_rule(self_value, self_bdim, rcond);
  9809. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9810. }
  9811. template <typename batch_rule_t, batch_rule_t batch_rule>
  9812. at::Tensor poisson_nll_loss_generated_plumbing(const at::Tensor & input, const at::Tensor & target, bool log_input, bool full, double eps, int64_t reduction) {
  9813. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9814. auto maybe_layer = maybeCurrentDynamicLayer();
  9815. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9816. int64_t cur_level = maybe_layer->layerId();
  9817. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(target, cur_level)) {
  9818. return at::_ops::poisson_nll_loss::call(input, target, log_input, full, eps, reduction);
  9819. }
  9820. Tensor input_value;
  9821. optional<int64_t> input_bdim;
  9822. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  9823. Tensor target_value;
  9824. optional<int64_t> target_bdim;
  9825. std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
  9826. auto results = batch_rule(input_value, input_bdim, target_value, target_bdim, log_input, full, eps, reduction);
  9827. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9828. }
  9829. template <typename batch_rule_t, batch_rule_t batch_rule>
  9830. at::Tensor rad2deg_generated_plumbing(const at::Tensor & self) {
  9831. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9832. auto maybe_layer = maybeCurrentDynamicLayer();
  9833. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9834. int64_t cur_level = maybe_layer->layerId();
  9835. if (!isBatchedAtLevel(self, cur_level)) {
  9836. return at::_ops::rad2deg::call(self);
  9837. }
  9838. Tensor self_value;
  9839. optional<int64_t> self_bdim;
  9840. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  9841. auto results = batch_rule(self_value, self_bdim);
  9842. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9843. }
  9844. template <typename batch_rule_t, batch_rule_t batch_rule>
  9845. at::Tensor & rad2deg__generated_plumbing(at::Tensor & self) {
  9846. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9847. auto maybe_layer = maybeCurrentDynamicLayer();
  9848. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  9849. int64_t cur_level = maybe_layer->layerId();
  9850. if (!isBatchedAtLevel(self, cur_level)) {
  9851. return at::_ops::rad2deg_::call(self);
  9852. }
  9853. Tensor self_value;
  9854. optional<int64_t> self_bdim;
  9855. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  9856. batch_rule(self_value, self_bdim);
  9857. return self;
  9858. }
  9859. template <typename batch_rule_t, batch_rule_t batch_rule>
  9860. at::Tensor deg2rad_generated_plumbing(const at::Tensor & self) {
  9861. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9862. auto maybe_layer = maybeCurrentDynamicLayer();
  9863. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9864. int64_t cur_level = maybe_layer->layerId();
  9865. if (!isBatchedAtLevel(self, cur_level)) {
  9866. return at::_ops::deg2rad::call(self);
  9867. }
  9868. Tensor self_value;
  9869. optional<int64_t> self_bdim;
  9870. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  9871. auto results = batch_rule(self_value, self_bdim);
  9872. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9873. }
  9874. template <typename batch_rule_t, batch_rule_t batch_rule>
  9875. at::Tensor & deg2rad__generated_plumbing(at::Tensor & self) {
  9876. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9877. auto maybe_layer = maybeCurrentDynamicLayer();
  9878. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  9879. int64_t cur_level = maybe_layer->layerId();
  9880. if (!isBatchedAtLevel(self, cur_level)) {
  9881. return at::_ops::deg2rad_::call(self);
  9882. }
  9883. Tensor self_value;
  9884. optional<int64_t> self_bdim;
  9885. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  9886. batch_rule(self_value, self_bdim);
  9887. return self;
  9888. }
  9889. template <typename batch_rule_t, batch_rule_t batch_rule>
  9890. at::Tensor rand_like_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
  9891. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9892. auto maybe_layer = maybeCurrentDynamicLayer();
  9893. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9894. int64_t cur_level = maybe_layer->layerId();
  9895. if (!isBatchedAtLevel(self, cur_level)) {
  9896. return at::_ops::rand_like::call(self, dtype, layout, device, pin_memory, memory_format);
  9897. }
  9898. Tensor self_value;
  9899. optional<int64_t> self_bdim;
  9900. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  9901. auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, memory_format);
  9902. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9903. }
  9904. template <typename batch_rule_t, batch_rule_t batch_rule>
  9905. at::Tensor randint_like_generated_plumbing(const at::Tensor & self, int64_t high, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
  9906. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9907. auto maybe_layer = maybeCurrentDynamicLayer();
  9908. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9909. int64_t cur_level = maybe_layer->layerId();
  9910. if (!isBatchedAtLevel(self, cur_level)) {
  9911. return at::_ops::randint_like::call(self, high, dtype, layout, device, pin_memory, memory_format);
  9912. }
  9913. Tensor self_value;
  9914. optional<int64_t> self_bdim;
  9915. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  9916. auto results = batch_rule(self_value, self_bdim, high, dtype, layout, device, pin_memory, memory_format);
  9917. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9918. }
  9919. template <typename batch_rule_t, batch_rule_t batch_rule>
  9920. at::Tensor randint_like_low_dtype_generated_plumbing(const at::Tensor & self, int64_t low, int64_t high, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
  9921. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9922. auto maybe_layer = maybeCurrentDynamicLayer();
  9923. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9924. int64_t cur_level = maybe_layer->layerId();
  9925. if (!isBatchedAtLevel(self, cur_level)) {
  9926. return at::_ops::randint_like_low_dtype::call(self, low, high, dtype, layout, device, pin_memory, memory_format);
  9927. }
  9928. Tensor self_value;
  9929. optional<int64_t> self_bdim;
  9930. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  9931. auto results = batch_rule(self_value, self_bdim, low, high, dtype, layout, device, pin_memory, memory_format);
  9932. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9933. }
  9934. template <typename batch_rule_t, batch_rule_t batch_rule>
  9935. at::Tensor randn_like_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
  9936. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9937. auto maybe_layer = maybeCurrentDynamicLayer();
  9938. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9939. int64_t cur_level = maybe_layer->layerId();
  9940. if (!isBatchedAtLevel(self, cur_level)) {
  9941. return at::_ops::randn_like::call(self, dtype, layout, device, pin_memory, memory_format);
  9942. }
  9943. Tensor self_value;
  9944. optional<int64_t> self_bdim;
  9945. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  9946. auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, memory_format);
  9947. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9948. }
  9949. template <typename batch_rule_t, batch_rule_t batch_rule>
  9950. at::Tensor ravel_generated_plumbing(const at::Tensor & self) {
  9951. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9952. auto maybe_layer = maybeCurrentDynamicLayer();
  9953. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9954. int64_t cur_level = maybe_layer->layerId();
  9955. if (!isBatchedAtLevel(self, cur_level)) {
  9956. return at::_ops::ravel::call(self);
  9957. }
  9958. Tensor self_value;
  9959. optional<int64_t> self_bdim;
  9960. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  9961. auto results = batch_rule(self_value, self_bdim);
  9962. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9963. }
  9964. template <typename batch_rule_t, batch_rule_t batch_rule>
  9965. at::Tensor reciprocal_generated_plumbing(const at::Tensor & self) {
  9966. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9967. auto maybe_layer = maybeCurrentDynamicLayer();
  9968. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9969. int64_t cur_level = maybe_layer->layerId();
  9970. if (!isBatchedAtLevel(self, cur_level)) {
  9971. return at::_ops::reciprocal::call(self);
  9972. }
  9973. Tensor self_value;
  9974. optional<int64_t> self_bdim;
  9975. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  9976. auto results = batch_rule(self_value, self_bdim);
  9977. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  9978. }
  9979. template <typename batch_rule_t, batch_rule_t batch_rule>
  9980. at::Tensor & reciprocal__generated_plumbing(at::Tensor & self) {
  9981. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9982. auto maybe_layer = maybeCurrentDynamicLayer();
  9983. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  9984. int64_t cur_level = maybe_layer->layerId();
  9985. if (!isBatchedAtLevel(self, cur_level)) {
  9986. return at::_ops::reciprocal_::call(self);
  9987. }
  9988. Tensor self_value;
  9989. optional<int64_t> self_bdim;
  9990. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  9991. batch_rule(self_value, self_bdim);
  9992. return self;
  9993. }
  9994. template <typename batch_rule_t, batch_rule_t batch_rule>
  9995. at::Tensor neg_generated_plumbing(const at::Tensor & self) {
  9996. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  9997. auto maybe_layer = maybeCurrentDynamicLayer();
  9998. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  9999. int64_t cur_level = maybe_layer->layerId();
  10000. if (!isBatchedAtLevel(self, cur_level)) {
  10001. return at::_ops::neg::call(self);
  10002. }
  10003. Tensor self_value;
  10004. optional<int64_t> self_bdim;
  10005. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10006. auto results = batch_rule(self_value, self_bdim);
  10007. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10008. }
  10009. template <typename batch_rule_t, batch_rule_t batch_rule>
  10010. at::Tensor & neg__generated_plumbing(at::Tensor & self) {
  10011. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10012. auto maybe_layer = maybeCurrentDynamicLayer();
  10013. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  10014. int64_t cur_level = maybe_layer->layerId();
  10015. if (!isBatchedAtLevel(self, cur_level)) {
  10016. return at::_ops::neg_::call(self);
  10017. }
  10018. Tensor self_value;
  10019. optional<int64_t> self_bdim;
  10020. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10021. batch_rule(self_value, self_bdim);
  10022. return self;
  10023. }
  10024. template <typename batch_rule_t, batch_rule_t batch_rule>
  10025. at::Tensor negative_generated_plumbing(const at::Tensor & self) {
  10026. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10027. auto maybe_layer = maybeCurrentDynamicLayer();
  10028. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10029. int64_t cur_level = maybe_layer->layerId();
  10030. if (!isBatchedAtLevel(self, cur_level)) {
  10031. return at::_ops::negative::call(self);
  10032. }
  10033. Tensor self_value;
  10034. optional<int64_t> self_bdim;
  10035. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10036. auto results = batch_rule(self_value, self_bdim);
  10037. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10038. }
  10039. template <typename batch_rule_t, batch_rule_t batch_rule>
  10040. at::Tensor & negative__generated_plumbing(at::Tensor & self) {
  10041. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10042. auto maybe_layer = maybeCurrentDynamicLayer();
  10043. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  10044. int64_t cur_level = maybe_layer->layerId();
  10045. if (!isBatchedAtLevel(self, cur_level)) {
  10046. return at::_ops::negative_::call(self);
  10047. }
  10048. Tensor self_value;
  10049. optional<int64_t> self_bdim;
  10050. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10051. batch_rule(self_value, self_bdim);
  10052. return self;
  10053. }
  10054. template <typename batch_rule_t, batch_rule_t batch_rule>
  10055. at::Tensor repeat_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef repeats) {
  10056. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10057. auto maybe_layer = maybeCurrentDynamicLayer();
  10058. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10059. int64_t cur_level = maybe_layer->layerId();
  10060. if (!isBatchedAtLevel(self, cur_level)) {
  10061. return at::_ops::repeat::call(self, repeats);
  10062. }
  10063. Tensor self_value;
  10064. optional<int64_t> self_bdim;
  10065. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10066. auto results = batch_rule(self_value, self_bdim, repeats);
  10067. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10068. }
  10069. template <typename batch_rule_t, batch_rule_t batch_rule>
  10070. at::Tensor repeat_interleave_Tensor_generated_plumbing(const at::Tensor & repeats, c10::optional<int64_t> output_size) {
  10071. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10072. auto maybe_layer = maybeCurrentDynamicLayer();
  10073. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10074. int64_t cur_level = maybe_layer->layerId();
  10075. if (!isBatchedAtLevel(repeats, cur_level)) {
  10076. return at::_ops::repeat_interleave_Tensor::call(repeats, output_size);
  10077. }
  10078. Tensor repeats_value;
  10079. optional<int64_t> repeats_bdim;
  10080. std::tie(repeats_value, repeats_bdim) = unwrapTensorAtLevel(repeats, cur_level);
  10081. auto results = batch_rule(repeats_value, repeats_bdim, output_size);
  10082. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10083. }
  10084. template <typename batch_rule_t, batch_rule_t batch_rule>
  10085. at::Tensor repeat_interleave_self_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & repeats, c10::optional<int64_t> dim, c10::optional<int64_t> output_size) {
  10086. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10087. auto maybe_layer = maybeCurrentDynamicLayer();
  10088. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10089. int64_t cur_level = maybe_layer->layerId();
  10090. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(repeats, cur_level)) {
  10091. return at::_ops::repeat_interleave_self_Tensor::call(self, repeats, dim, output_size);
  10092. }
  10093. Tensor self_value;
  10094. optional<int64_t> self_bdim;
  10095. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10096. Tensor repeats_value;
  10097. optional<int64_t> repeats_bdim;
  10098. std::tie(repeats_value, repeats_bdim) = unwrapTensorAtLevel(repeats, cur_level);
  10099. auto results = batch_rule(self_value, self_bdim, repeats_value, repeats_bdim, dim, output_size);
  10100. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10101. }
  10102. template <typename batch_rule_t, batch_rule_t batch_rule>
  10103. at::Tensor repeat_interleave_self_int_generated_plumbing(const at::Tensor & self, c10::SymInt repeats, c10::optional<int64_t> dim, c10::optional<int64_t> output_size) {
  10104. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10105. auto maybe_layer = maybeCurrentDynamicLayer();
  10106. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10107. int64_t cur_level = maybe_layer->layerId();
  10108. if (!isBatchedAtLevel(self, cur_level)) {
  10109. return at::_ops::repeat_interleave_self_int::call(self, repeats, dim, output_size);
  10110. }
  10111. Tensor self_value;
  10112. optional<int64_t> self_bdim;
  10113. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10114. auto results = batch_rule(self_value, self_bdim, repeats, dim, output_size);
  10115. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10116. }
  10117. template <typename batch_rule_t, batch_rule_t batch_rule>
  10118. at::Tensor reshape_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef shape) {
  10119. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10120. auto maybe_layer = maybeCurrentDynamicLayer();
  10121. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10122. int64_t cur_level = maybe_layer->layerId();
  10123. if (!isBatchedAtLevel(self, cur_level)) {
  10124. return at::_ops::reshape::call(self, shape);
  10125. }
  10126. Tensor self_value;
  10127. optional<int64_t> self_bdim;
  10128. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10129. auto results = batch_rule(self_value, self_bdim, shape);
  10130. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10131. }
  10132. template <typename batch_rule_t, batch_rule_t batch_rule>
  10133. at::Tensor _reshape_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size) {
  10134. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10135. auto maybe_layer = maybeCurrentDynamicLayer();
  10136. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10137. int64_t cur_level = maybe_layer->layerId();
  10138. if (!isBatchedAtLevel(self, cur_level)) {
  10139. return at::_ops::_reshape_copy::call(self, size);
  10140. }
  10141. Tensor self_value;
  10142. optional<int64_t> self_bdim;
  10143. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10144. auto results = batch_rule(self_value, self_bdim, size);
  10145. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10146. }
  10147. template <typename batch_rule_t, batch_rule_t batch_rule>
  10148. at::Tensor _reshape_alias_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
  10149. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10150. auto maybe_layer = maybeCurrentDynamicLayer();
  10151. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10152. int64_t cur_level = maybe_layer->layerId();
  10153. if (!isBatchedAtLevel(self, cur_level)) {
  10154. return at::_ops::_reshape_alias::call(self, size, stride);
  10155. }
  10156. Tensor self_value;
  10157. optional<int64_t> self_bdim;
  10158. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10159. auto results = batch_rule(self_value, self_bdim, size, stride);
  10160. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10161. }
  10162. template <typename batch_rule_t, batch_rule_t batch_rule>
  10163. at::Tensor _mkldnn_reshape_generated_plumbing(const at::Tensor & self, at::IntArrayRef shape) {
  10164. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10165. auto maybe_layer = maybeCurrentDynamicLayer();
  10166. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10167. int64_t cur_level = maybe_layer->layerId();
  10168. if (!isBatchedAtLevel(self, cur_level)) {
  10169. return at::_ops::_mkldnn_reshape::call(self, shape);
  10170. }
  10171. Tensor self_value;
  10172. optional<int64_t> self_bdim;
  10173. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10174. auto results = batch_rule(self_value, self_bdim, shape);
  10175. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10176. }
  10177. template <typename batch_rule_t, batch_rule_t batch_rule>
  10178. at::Tensor reshape_as_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  10179. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10180. auto maybe_layer = maybeCurrentDynamicLayer();
  10181. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10182. int64_t cur_level = maybe_layer->layerId();
  10183. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  10184. return at::_ops::reshape_as::call(self, other);
  10185. }
  10186. Tensor self_value;
  10187. optional<int64_t> self_bdim;
  10188. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10189. Tensor other_value;
  10190. optional<int64_t> other_bdim;
  10191. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  10192. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  10193. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10194. }
  10195. template <typename batch_rule_t, batch_rule_t batch_rule>
  10196. at::Tensor round_generated_plumbing(const at::Tensor & self) {
  10197. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10198. auto maybe_layer = maybeCurrentDynamicLayer();
  10199. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10200. int64_t cur_level = maybe_layer->layerId();
  10201. if (!isBatchedAtLevel(self, cur_level)) {
  10202. return at::_ops::round::call(self);
  10203. }
  10204. Tensor self_value;
  10205. optional<int64_t> self_bdim;
  10206. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10207. auto results = batch_rule(self_value, self_bdim);
  10208. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10209. }
  10210. template <typename batch_rule_t, batch_rule_t batch_rule>
  10211. at::Tensor & round__generated_plumbing(at::Tensor & self) {
  10212. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10213. auto maybe_layer = maybeCurrentDynamicLayer();
  10214. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  10215. int64_t cur_level = maybe_layer->layerId();
  10216. if (!isBatchedAtLevel(self, cur_level)) {
  10217. return at::_ops::round_::call(self);
  10218. }
  10219. Tensor self_value;
  10220. optional<int64_t> self_bdim;
  10221. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10222. batch_rule(self_value, self_bdim);
  10223. return self;
  10224. }
  10225. template <typename batch_rule_t, batch_rule_t batch_rule>
  10226. at::Tensor round_decimals_generated_plumbing(const at::Tensor & self, int64_t decimals) {
  10227. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10228. auto maybe_layer = maybeCurrentDynamicLayer();
  10229. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10230. int64_t cur_level = maybe_layer->layerId();
  10231. if (!isBatchedAtLevel(self, cur_level)) {
  10232. return at::_ops::round_decimals::call(self, decimals);
  10233. }
  10234. Tensor self_value;
  10235. optional<int64_t> self_bdim;
  10236. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10237. auto results = batch_rule(self_value, self_bdim, decimals);
  10238. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10239. }
  10240. template <typename batch_rule_t, batch_rule_t batch_rule>
  10241. at::Tensor & round__decimals_generated_plumbing(at::Tensor & self, int64_t decimals) {
  10242. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10243. auto maybe_layer = maybeCurrentDynamicLayer();
  10244. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  10245. int64_t cur_level = maybe_layer->layerId();
  10246. if (!isBatchedAtLevel(self, cur_level)) {
  10247. return at::_ops::round__decimals::call(self, decimals);
  10248. }
  10249. Tensor self_value;
  10250. optional<int64_t> self_bdim;
  10251. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10252. batch_rule(self_value, self_bdim, decimals);
  10253. return self;
  10254. }
  10255. template <typename batch_rule_t, batch_rule_t batch_rule>
  10256. at::Tensor rrelu_generated_plumbing(const at::Tensor & self, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional<at::Generator> generator) {
  10257. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10258. auto maybe_layer = maybeCurrentDynamicLayer();
  10259. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10260. int64_t cur_level = maybe_layer->layerId();
  10261. if (!isBatchedAtLevel(self, cur_level)) {
  10262. return at::_ops::rrelu::call(self, lower, upper, training, generator);
  10263. }
  10264. Tensor self_value;
  10265. optional<int64_t> self_bdim;
  10266. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10267. auto results = batch_rule(self_value, self_bdim, lower, upper, training, generator);
  10268. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10269. }
  10270. template <typename batch_rule_t, batch_rule_t batch_rule>
  10271. at::Tensor & rrelu__generated_plumbing(at::Tensor & self, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional<at::Generator> generator) {
  10272. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10273. auto maybe_layer = maybeCurrentDynamicLayer();
  10274. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  10275. int64_t cur_level = maybe_layer->layerId();
  10276. if (!isBatchedAtLevel(self, cur_level)) {
  10277. return at::_ops::rrelu_::call(self, lower, upper, training, generator);
  10278. }
  10279. Tensor self_value;
  10280. optional<int64_t> self_bdim;
  10281. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10282. batch_rule(self_value, self_bdim, lower, upper, training, generator);
  10283. return self;
  10284. }
  10285. template <typename batch_rule_t, batch_rule_t batch_rule>
  10286. at::Tensor relu_generated_plumbing(const at::Tensor & self) {
  10287. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10288. auto maybe_layer = maybeCurrentDynamicLayer();
  10289. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10290. int64_t cur_level = maybe_layer->layerId();
  10291. if (!isBatchedAtLevel(self, cur_level)) {
  10292. return at::_ops::relu::call(self);
  10293. }
  10294. Tensor self_value;
  10295. optional<int64_t> self_bdim;
  10296. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10297. auto results = batch_rule(self_value, self_bdim);
  10298. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10299. }
  10300. template <typename batch_rule_t, batch_rule_t batch_rule>
  10301. at::Tensor & relu__generated_plumbing(at::Tensor & self) {
  10302. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10303. auto maybe_layer = maybeCurrentDynamicLayer();
  10304. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  10305. int64_t cur_level = maybe_layer->layerId();
  10306. if (!isBatchedAtLevel(self, cur_level)) {
  10307. return at::_ops::relu_::call(self);
  10308. }
  10309. Tensor self_value;
  10310. optional<int64_t> self_bdim;
  10311. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10312. batch_rule(self_value, self_bdim);
  10313. return self;
  10314. }
  10315. template <typename batch_rule_t, batch_rule_t batch_rule>
  10316. at::Tensor relu6_generated_plumbing(const at::Tensor & self) {
  10317. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10318. auto maybe_layer = maybeCurrentDynamicLayer();
  10319. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10320. int64_t cur_level = maybe_layer->layerId();
  10321. if (!isBatchedAtLevel(self, cur_level)) {
  10322. return at::_ops::relu6::call(self);
  10323. }
  10324. Tensor self_value;
  10325. optional<int64_t> self_bdim;
  10326. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10327. auto results = batch_rule(self_value, self_bdim);
  10328. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10329. }
  10330. template <typename batch_rule_t, batch_rule_t batch_rule>
  10331. at::Tensor & relu6__generated_plumbing(at::Tensor & self) {
  10332. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10333. auto maybe_layer = maybeCurrentDynamicLayer();
  10334. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  10335. int64_t cur_level = maybe_layer->layerId();
  10336. if (!isBatchedAtLevel(self, cur_level)) {
  10337. return at::_ops::relu6_::call(self);
  10338. }
  10339. Tensor self_value;
  10340. optional<int64_t> self_bdim;
  10341. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10342. batch_rule(self_value, self_bdim);
  10343. return self;
  10344. }
  10345. template <typename batch_rule_t, batch_rule_t batch_rule>
  10346. at::Tensor prelu_generated_plumbing(const at::Tensor & self, const at::Tensor & weight) {
  10347. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10348. auto maybe_layer = maybeCurrentDynamicLayer();
  10349. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10350. int64_t cur_level = maybe_layer->layerId();
  10351. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
  10352. return at::_ops::prelu::call(self, weight);
  10353. }
  10354. Tensor self_value;
  10355. optional<int64_t> self_bdim;
  10356. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10357. Tensor weight_value;
  10358. optional<int64_t> weight_bdim;
  10359. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  10360. auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim);
  10361. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10362. }
  10363. template <typename batch_rule_t, batch_rule_t batch_rule>
  10364. at::Tensor _prelu_kernel_generated_plumbing(const at::Tensor & self, const at::Tensor & weight) {
  10365. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10366. auto maybe_layer = maybeCurrentDynamicLayer();
  10367. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10368. int64_t cur_level = maybe_layer->layerId();
  10369. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
  10370. return at::_ops::_prelu_kernel::call(self, weight);
  10371. }
  10372. Tensor self_value;
  10373. optional<int64_t> self_bdim;
  10374. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10375. Tensor weight_value;
  10376. optional<int64_t> weight_bdim;
  10377. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  10378. auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim);
  10379. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10380. }
  10381. template <typename batch_rule_t, batch_rule_t batch_rule>
  10382. ::std::tuple<at::Tensor,at::Tensor> _prelu_kernel_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight) {
  10383. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10384. auto maybe_layer = maybeCurrentDynamicLayer();
  10385. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10386. int64_t cur_level = maybe_layer->layerId();
  10387. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
  10388. return at::_ops::_prelu_kernel_backward::call(grad_output, self, weight);
  10389. }
  10390. Tensor grad_output_value;
  10391. optional<int64_t> grad_output_bdim;
  10392. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  10393. Tensor self_value;
  10394. optional<int64_t> self_bdim;
  10395. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10396. Tensor weight_value;
  10397. optional<int64_t> weight_bdim;
  10398. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  10399. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, weight_value, weight_bdim);
  10400. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  10401. }
  10402. template <typename batch_rule_t, batch_rule_t batch_rule>
  10403. at::Tensor & gelu__generated_plumbing(at::Tensor & self, c10::string_view approximate) {
  10404. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10405. auto maybe_layer = maybeCurrentDynamicLayer();
  10406. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  10407. int64_t cur_level = maybe_layer->layerId();
  10408. if (!isBatchedAtLevel(self, cur_level)) {
  10409. return at::_ops::gelu_::call(self, approximate);
  10410. }
  10411. Tensor self_value;
  10412. optional<int64_t> self_bdim;
  10413. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10414. batch_rule(self_value, self_bdim, approximate);
  10415. return self;
  10416. }
  10417. template <typename batch_rule_t, batch_rule_t batch_rule>
  10418. at::Tensor gelu_generated_plumbing(const at::Tensor & self, c10::string_view approximate) {
  10419. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10420. auto maybe_layer = maybeCurrentDynamicLayer();
  10421. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10422. int64_t cur_level = maybe_layer->layerId();
  10423. if (!isBatchedAtLevel(self, cur_level)) {
  10424. return at::_ops::gelu::call(self, approximate);
  10425. }
  10426. Tensor self_value;
  10427. optional<int64_t> self_bdim;
  10428. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10429. auto results = batch_rule(self_value, self_bdim, approximate);
  10430. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10431. }
  10432. template <typename batch_rule_t, batch_rule_t batch_rule>
  10433. at::Tensor gelu_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate) {
  10434. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10435. auto maybe_layer = maybeCurrentDynamicLayer();
  10436. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10437. int64_t cur_level = maybe_layer->layerId();
  10438. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
  10439. return at::_ops::gelu_backward::call(grad_output, self, approximate);
  10440. }
  10441. Tensor grad_output_value;
  10442. optional<int64_t> grad_output_bdim;
  10443. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  10444. Tensor self_value;
  10445. optional<int64_t> self_bdim;
  10446. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10447. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, approximate);
  10448. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10449. }
  10450. template <typename batch_rule_t, batch_rule_t batch_rule>
  10451. at::Tensor infinitely_differentiable_gelu_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self) {
  10452. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10453. auto maybe_layer = maybeCurrentDynamicLayer();
  10454. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10455. int64_t cur_level = maybe_layer->layerId();
  10456. if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level)) {
  10457. return at::_ops::infinitely_differentiable_gelu_backward::call(grad, self);
  10458. }
  10459. Tensor grad_value;
  10460. optional<int64_t> grad_bdim;
  10461. std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
  10462. Tensor self_value;
  10463. optional<int64_t> self_bdim;
  10464. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10465. auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim);
  10466. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10467. }
  10468. template <typename batch_rule_t, batch_rule_t batch_rule>
  10469. at::Tensor hardshrink_generated_plumbing(const at::Tensor & self, const at::Scalar & lambd) {
  10470. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10471. auto maybe_layer = maybeCurrentDynamicLayer();
  10472. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10473. int64_t cur_level = maybe_layer->layerId();
  10474. if (!isBatchedAtLevel(self, cur_level)) {
  10475. return at::_ops::hardshrink::call(self, lambd);
  10476. }
  10477. Tensor self_value;
  10478. optional<int64_t> self_bdim;
  10479. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10480. auto results = batch_rule(self_value, self_bdim, lambd);
  10481. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10482. }
  10483. template <typename batch_rule_t, batch_rule_t batch_rule>
  10484. at::Tensor hardshrink_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd) {
  10485. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10486. auto maybe_layer = maybeCurrentDynamicLayer();
  10487. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10488. int64_t cur_level = maybe_layer->layerId();
  10489. if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(self, cur_level)) {
  10490. return at::_ops::hardshrink_backward::call(grad_out, self, lambd);
  10491. }
  10492. Tensor grad_out_value;
  10493. optional<int64_t> grad_out_bdim;
  10494. std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level);
  10495. Tensor self_value;
  10496. optional<int64_t> self_bdim;
  10497. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10498. auto results = batch_rule(grad_out_value, grad_out_bdim, self_value, self_bdim, lambd);
  10499. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10500. }
  10501. template <typename batch_rule_t, batch_rule_t batch_rule>
  10502. at::Tensor rsqrt_generated_plumbing(const at::Tensor & self) {
  10503. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10504. auto maybe_layer = maybeCurrentDynamicLayer();
  10505. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10506. int64_t cur_level = maybe_layer->layerId();
  10507. if (!isBatchedAtLevel(self, cur_level)) {
  10508. return at::_ops::rsqrt::call(self);
  10509. }
  10510. Tensor self_value;
  10511. optional<int64_t> self_bdim;
  10512. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10513. auto results = batch_rule(self_value, self_bdim);
  10514. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10515. }
  10516. template <typename batch_rule_t, batch_rule_t batch_rule>
  10517. at::Tensor & rsqrt__generated_plumbing(at::Tensor & self) {
  10518. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10519. auto maybe_layer = maybeCurrentDynamicLayer();
  10520. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  10521. int64_t cur_level = maybe_layer->layerId();
  10522. if (!isBatchedAtLevel(self, cur_level)) {
  10523. return at::_ops::rsqrt_::call(self);
  10524. }
  10525. Tensor self_value;
  10526. optional<int64_t> self_bdim;
  10527. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10528. batch_rule(self_value, self_bdim);
  10529. return self;
  10530. }
  10531. template <typename batch_rule_t, batch_rule_t batch_rule>
  10532. at::Tensor select_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, int64_t index) {
  10533. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10534. auto maybe_layer = maybeCurrentDynamicLayer();
  10535. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10536. int64_t cur_level = maybe_layer->layerId();
  10537. if (!isBatchedAtLevel(self, cur_level)) {
  10538. return at::_ops::select_Dimname::call(self, dim, index);
  10539. }
  10540. Tensor self_value;
  10541. optional<int64_t> self_bdim;
  10542. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10543. auto results = batch_rule(self_value, self_bdim, dim, index);
  10544. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10545. }
  10546. template <typename batch_rule_t, batch_rule_t batch_rule>
  10547. at::Tensor select_int_generated_plumbing(const at::Tensor & self, int64_t dim, c10::SymInt index) {
  10548. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10549. auto maybe_layer = maybeCurrentDynamicLayer();
  10550. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10551. int64_t cur_level = maybe_layer->layerId();
  10552. if (!isBatchedAtLevel(self, cur_level)) {
  10553. return at::_ops::select_int::call(self, dim, index);
  10554. }
  10555. Tensor self_value;
  10556. optional<int64_t> self_bdim;
  10557. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10558. auto results = batch_rule(self_value, self_bdim, dim, index);
  10559. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10560. }
  10561. template <typename batch_rule_t, batch_rule_t batch_rule>
  10562. at::Tensor select_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index) {
  10563. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10564. auto maybe_layer = maybeCurrentDynamicLayer();
  10565. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10566. int64_t cur_level = maybe_layer->layerId();
  10567. if (!isBatchedAtLevel(grad_output, cur_level)) {
  10568. return at::_ops::select_backward::call(grad_output, input_sizes, dim, index);
  10569. }
  10570. Tensor grad_output_value;
  10571. optional<int64_t> grad_output_bdim;
  10572. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  10573. auto results = batch_rule(grad_output_value, grad_output_bdim, input_sizes, dim, index);
  10574. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10575. }
  10576. template <typename batch_rule_t, batch_rule_t batch_rule>
  10577. at::Tensor _nested_select_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, c10::SymInt index) {
  10578. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10579. auto maybe_layer = maybeCurrentDynamicLayer();
  10580. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10581. int64_t cur_level = maybe_layer->layerId();
  10582. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
  10583. return at::_ops::_nested_select_backward::call(grad_output, self, dim, index);
  10584. }
  10585. Tensor grad_output_value;
  10586. optional<int64_t> grad_output_bdim;
  10587. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  10588. Tensor self_value;
  10589. optional<int64_t> self_bdim;
  10590. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10591. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, dim, index);
  10592. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10593. }
  10594. template <typename batch_rule_t, batch_rule_t batch_rule>
  10595. at::Tensor selu_generated_plumbing(const at::Tensor & self) {
  10596. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10597. auto maybe_layer = maybeCurrentDynamicLayer();
  10598. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10599. int64_t cur_level = maybe_layer->layerId();
  10600. if (!isBatchedAtLevel(self, cur_level)) {
  10601. return at::_ops::selu::call(self);
  10602. }
  10603. Tensor self_value;
  10604. optional<int64_t> self_bdim;
  10605. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10606. auto results = batch_rule(self_value, self_bdim);
  10607. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10608. }
  10609. template <typename batch_rule_t, batch_rule_t batch_rule>
  10610. at::Tensor & selu__generated_plumbing(at::Tensor & self) {
  10611. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10612. auto maybe_layer = maybeCurrentDynamicLayer();
  10613. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  10614. int64_t cur_level = maybe_layer->layerId();
  10615. if (!isBatchedAtLevel(self, cur_level)) {
  10616. return at::_ops::selu_::call(self);
  10617. }
  10618. Tensor self_value;
  10619. optional<int64_t> self_bdim;
  10620. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10621. batch_rule(self_value, self_bdim);
  10622. return self;
  10623. }
  10624. template <typename batch_rule_t, batch_rule_t batch_rule>
  10625. at::Tensor celu_generated_plumbing(const at::Tensor & self, const at::Scalar & alpha) {
  10626. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10627. auto maybe_layer = maybeCurrentDynamicLayer();
  10628. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10629. int64_t cur_level = maybe_layer->layerId();
  10630. if (!isBatchedAtLevel(self, cur_level)) {
  10631. return at::_ops::celu::call(self, alpha);
  10632. }
  10633. Tensor self_value;
  10634. optional<int64_t> self_bdim;
  10635. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10636. auto results = batch_rule(self_value, self_bdim, alpha);
  10637. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10638. }
  10639. template <typename batch_rule_t, batch_rule_t batch_rule>
  10640. at::Tensor & celu__generated_plumbing(at::Tensor & self, const at::Scalar & alpha) {
  10641. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10642. auto maybe_layer = maybeCurrentDynamicLayer();
  10643. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  10644. int64_t cur_level = maybe_layer->layerId();
  10645. if (!isBatchedAtLevel(self, cur_level)) {
  10646. return at::_ops::celu_::call(self, alpha);
  10647. }
  10648. Tensor self_value;
  10649. optional<int64_t> self_bdim;
  10650. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10651. batch_rule(self_value, self_bdim, alpha);
  10652. return self;
  10653. }
  10654. template <typename batch_rule_t, batch_rule_t batch_rule>
  10655. at::Tensor silu_generated_plumbing(const at::Tensor & self) {
  10656. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10657. auto maybe_layer = maybeCurrentDynamicLayer();
  10658. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10659. int64_t cur_level = maybe_layer->layerId();
  10660. if (!isBatchedAtLevel(self, cur_level)) {
  10661. return at::_ops::silu::call(self);
  10662. }
  10663. Tensor self_value;
  10664. optional<int64_t> self_bdim;
  10665. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10666. auto results = batch_rule(self_value, self_bdim);
  10667. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10668. }
  10669. template <typename batch_rule_t, batch_rule_t batch_rule>
  10670. at::Tensor & silu__generated_plumbing(at::Tensor & self) {
  10671. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10672. auto maybe_layer = maybeCurrentDynamicLayer();
  10673. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  10674. int64_t cur_level = maybe_layer->layerId();
  10675. if (!isBatchedAtLevel(self, cur_level)) {
  10676. return at::_ops::silu_::call(self);
  10677. }
  10678. Tensor self_value;
  10679. optional<int64_t> self_bdim;
  10680. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10681. batch_rule(self_value, self_bdim);
  10682. return self;
  10683. }
  10684. template <typename batch_rule_t, batch_rule_t batch_rule>
  10685. at::Tensor silu_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) {
  10686. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10687. auto maybe_layer = maybeCurrentDynamicLayer();
  10688. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10689. int64_t cur_level = maybe_layer->layerId();
  10690. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
  10691. return at::_ops::silu_backward::call(grad_output, self);
  10692. }
  10693. Tensor grad_output_value;
  10694. optional<int64_t> grad_output_bdim;
  10695. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  10696. Tensor self_value;
  10697. optional<int64_t> self_bdim;
  10698. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10699. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim);
  10700. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10701. }
  10702. template <typename batch_rule_t, batch_rule_t batch_rule>
  10703. at::Tensor mish_generated_plumbing(const at::Tensor & self) {
  10704. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10705. auto maybe_layer = maybeCurrentDynamicLayer();
  10706. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10707. int64_t cur_level = maybe_layer->layerId();
  10708. if (!isBatchedAtLevel(self, cur_level)) {
  10709. return at::_ops::mish::call(self);
  10710. }
  10711. Tensor self_value;
  10712. optional<int64_t> self_bdim;
  10713. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10714. auto results = batch_rule(self_value, self_bdim);
  10715. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10716. }
  10717. template <typename batch_rule_t, batch_rule_t batch_rule>
  10718. at::Tensor & mish__generated_plumbing(at::Tensor & self) {
  10719. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10720. auto maybe_layer = maybeCurrentDynamicLayer();
  10721. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  10722. int64_t cur_level = maybe_layer->layerId();
  10723. if (!isBatchedAtLevel(self, cur_level)) {
  10724. return at::_ops::mish_::call(self);
  10725. }
  10726. Tensor self_value;
  10727. optional<int64_t> self_bdim;
  10728. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10729. batch_rule(self_value, self_bdim);
  10730. return self;
  10731. }
  10732. template <typename batch_rule_t, batch_rule_t batch_rule>
  10733. at::Tensor mish_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) {
  10734. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10735. auto maybe_layer = maybeCurrentDynamicLayer();
  10736. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10737. int64_t cur_level = maybe_layer->layerId();
  10738. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
  10739. return at::_ops::mish_backward::call(grad_output, self);
  10740. }
  10741. Tensor grad_output_value;
  10742. optional<int64_t> grad_output_bdim;
  10743. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  10744. Tensor self_value;
  10745. optional<int64_t> self_bdim;
  10746. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10747. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim);
  10748. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10749. }
  10750. template <typename batch_rule_t, batch_rule_t batch_rule>
  10751. at::Tensor sigmoid_generated_plumbing(const at::Tensor & self) {
  10752. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10753. auto maybe_layer = maybeCurrentDynamicLayer();
  10754. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10755. int64_t cur_level = maybe_layer->layerId();
  10756. if (!isBatchedAtLevel(self, cur_level)) {
  10757. return at::_ops::sigmoid::call(self);
  10758. }
  10759. Tensor self_value;
  10760. optional<int64_t> self_bdim;
  10761. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10762. auto results = batch_rule(self_value, self_bdim);
  10763. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10764. }
  10765. template <typename batch_rule_t, batch_rule_t batch_rule>
  10766. at::Tensor & sigmoid__generated_plumbing(at::Tensor & self) {
  10767. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10768. auto maybe_layer = maybeCurrentDynamicLayer();
  10769. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  10770. int64_t cur_level = maybe_layer->layerId();
  10771. if (!isBatchedAtLevel(self, cur_level)) {
  10772. return at::_ops::sigmoid_::call(self);
  10773. }
  10774. Tensor self_value;
  10775. optional<int64_t> self_bdim;
  10776. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10777. batch_rule(self_value, self_bdim);
  10778. return self;
  10779. }
  10780. template <typename batch_rule_t, batch_rule_t batch_rule>
  10781. at::Tensor logit_generated_plumbing(const at::Tensor & self, c10::optional<double> eps) {
  10782. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10783. auto maybe_layer = maybeCurrentDynamicLayer();
  10784. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10785. int64_t cur_level = maybe_layer->layerId();
  10786. if (!isBatchedAtLevel(self, cur_level)) {
  10787. return at::_ops::logit::call(self, eps);
  10788. }
  10789. Tensor self_value;
  10790. optional<int64_t> self_bdim;
  10791. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10792. auto results = batch_rule(self_value, self_bdim, eps);
  10793. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10794. }
  10795. template <typename batch_rule_t, batch_rule_t batch_rule>
  10796. at::Tensor & logit__generated_plumbing(at::Tensor & self, c10::optional<double> eps) {
  10797. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10798. auto maybe_layer = maybeCurrentDynamicLayer();
  10799. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  10800. int64_t cur_level = maybe_layer->layerId();
  10801. if (!isBatchedAtLevel(self, cur_level)) {
  10802. return at::_ops::logit_::call(self, eps);
  10803. }
  10804. Tensor self_value;
  10805. optional<int64_t> self_bdim;
  10806. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10807. batch_rule(self_value, self_bdim, eps);
  10808. return self;
  10809. }
  10810. template <typename batch_rule_t, batch_rule_t batch_rule>
  10811. at::Tensor sin_generated_plumbing(const at::Tensor & self) {
  10812. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10813. auto maybe_layer = maybeCurrentDynamicLayer();
  10814. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10815. int64_t cur_level = maybe_layer->layerId();
  10816. if (!isBatchedAtLevel(self, cur_level)) {
  10817. return at::_ops::sin::call(self);
  10818. }
  10819. Tensor self_value;
  10820. optional<int64_t> self_bdim;
  10821. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10822. auto results = batch_rule(self_value, self_bdim);
  10823. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10824. }
  10825. template <typename batch_rule_t, batch_rule_t batch_rule>
  10826. at::Tensor & sin__generated_plumbing(at::Tensor & self) {
  10827. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10828. auto maybe_layer = maybeCurrentDynamicLayer();
  10829. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  10830. int64_t cur_level = maybe_layer->layerId();
  10831. if (!isBatchedAtLevel(self, cur_level)) {
  10832. return at::_ops::sin_::call(self);
  10833. }
  10834. Tensor self_value;
  10835. optional<int64_t> self_bdim;
  10836. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10837. batch_rule(self_value, self_bdim);
  10838. return self;
  10839. }
  10840. template <typename batch_rule_t, batch_rule_t batch_rule>
  10841. at::Tensor sinc_generated_plumbing(const at::Tensor & self) {
  10842. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10843. auto maybe_layer = maybeCurrentDynamicLayer();
  10844. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10845. int64_t cur_level = maybe_layer->layerId();
  10846. if (!isBatchedAtLevel(self, cur_level)) {
  10847. return at::_ops::sinc::call(self);
  10848. }
  10849. Tensor self_value;
  10850. optional<int64_t> self_bdim;
  10851. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10852. auto results = batch_rule(self_value, self_bdim);
  10853. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10854. }
  10855. template <typename batch_rule_t, batch_rule_t batch_rule>
  10856. at::Tensor & sinc__generated_plumbing(at::Tensor & self) {
  10857. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10858. auto maybe_layer = maybeCurrentDynamicLayer();
  10859. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  10860. int64_t cur_level = maybe_layer->layerId();
  10861. if (!isBatchedAtLevel(self, cur_level)) {
  10862. return at::_ops::sinc_::call(self);
  10863. }
  10864. Tensor self_value;
  10865. optional<int64_t> self_bdim;
  10866. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10867. batch_rule(self_value, self_bdim);
  10868. return self;
  10869. }
  10870. template <typename batch_rule_t, batch_rule_t batch_rule>
  10871. at::Tensor sinh_generated_plumbing(const at::Tensor & self) {
  10872. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10873. auto maybe_layer = maybeCurrentDynamicLayer();
  10874. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10875. int64_t cur_level = maybe_layer->layerId();
  10876. if (!isBatchedAtLevel(self, cur_level)) {
  10877. return at::_ops::sinh::call(self);
  10878. }
  10879. Tensor self_value;
  10880. optional<int64_t> self_bdim;
  10881. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10882. auto results = batch_rule(self_value, self_bdim);
  10883. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10884. }
  10885. template <typename batch_rule_t, batch_rule_t batch_rule>
  10886. at::Tensor & sinh__generated_plumbing(at::Tensor & self) {
  10887. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10888. auto maybe_layer = maybeCurrentDynamicLayer();
  10889. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  10890. int64_t cur_level = maybe_layer->layerId();
  10891. if (!isBatchedAtLevel(self, cur_level)) {
  10892. return at::_ops::sinh_::call(self);
  10893. }
  10894. Tensor self_value;
  10895. optional<int64_t> self_bdim;
  10896. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10897. batch_rule(self_value, self_bdim);
  10898. return self;
  10899. }
  10900. template <typename batch_rule_t, batch_rule_t batch_rule>
  10901. at::Tensor detach_generated_plumbing(const at::Tensor & self) {
  10902. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10903. auto maybe_layer = maybeCurrentDynamicLayer();
  10904. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10905. int64_t cur_level = maybe_layer->layerId();
  10906. if (!isBatchedAtLevel(self, cur_level)) {
  10907. return at::_ops::detach::call(self);
  10908. }
  10909. Tensor self_value;
  10910. optional<int64_t> self_bdim;
  10911. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10912. auto results = batch_rule(self_value, self_bdim);
  10913. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10914. }
  10915. template <typename batch_rule_t, batch_rule_t batch_rule>
  10916. at::Tensor slice_Tensor_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) {
  10917. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10918. auto maybe_layer = maybeCurrentDynamicLayer();
  10919. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10920. int64_t cur_level = maybe_layer->layerId();
  10921. if (!isBatchedAtLevel(self, cur_level)) {
  10922. return at::_ops::slice_Tensor::call(self, dim, start, end, step);
  10923. }
  10924. Tensor self_value;
  10925. optional<int64_t> self_bdim;
  10926. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10927. auto results = batch_rule(self_value, self_bdim, dim, start, end, step);
  10928. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10929. }
  10930. template <typename batch_rule_t, batch_rule_t batch_rule>
  10931. at::Tensor slice_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step) {
  10932. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10933. auto maybe_layer = maybeCurrentDynamicLayer();
  10934. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10935. int64_t cur_level = maybe_layer->layerId();
  10936. if (!isBatchedAtLevel(grad_output, cur_level)) {
  10937. return at::_ops::slice_backward::call(grad_output, input_sizes, dim, start, end, step);
  10938. }
  10939. Tensor grad_output_value;
  10940. optional<int64_t> grad_output_bdim;
  10941. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  10942. auto results = batch_rule(grad_output_value, grad_output_bdim, input_sizes, dim, start, end, step);
  10943. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10944. }
  10945. template <typename batch_rule_t, batch_rule_t batch_rule>
  10946. at::Tensor slice_scatter_generated_plumbing(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) {
  10947. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10948. auto maybe_layer = maybeCurrentDynamicLayer();
  10949. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10950. int64_t cur_level = maybe_layer->layerId();
  10951. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
  10952. return at::_ops::slice_scatter::call(self, src, dim, start, end, step);
  10953. }
  10954. Tensor self_value;
  10955. optional<int64_t> self_bdim;
  10956. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10957. Tensor src_value;
  10958. optional<int64_t> src_bdim;
  10959. std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
  10960. auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, dim, start, end, step);
  10961. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10962. }
  10963. template <typename batch_rule_t, batch_rule_t batch_rule>
  10964. at::Tensor select_scatter_generated_plumbing(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::SymInt index) {
  10965. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10966. auto maybe_layer = maybeCurrentDynamicLayer();
  10967. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10968. int64_t cur_level = maybe_layer->layerId();
  10969. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
  10970. return at::_ops::select_scatter::call(self, src, dim, index);
  10971. }
  10972. Tensor self_value;
  10973. optional<int64_t> self_bdim;
  10974. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10975. Tensor src_value;
  10976. optional<int64_t> src_bdim;
  10977. std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
  10978. auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, dim, index);
  10979. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10980. }
  10981. template <typename batch_rule_t, batch_rule_t batch_rule>
  10982. at::Tensor diagonal_scatter_generated_plumbing(const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2) {
  10983. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  10984. auto maybe_layer = maybeCurrentDynamicLayer();
  10985. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  10986. int64_t cur_level = maybe_layer->layerId();
  10987. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
  10988. return at::_ops::diagonal_scatter::call(self, src, offset, dim1, dim2);
  10989. }
  10990. Tensor self_value;
  10991. optional<int64_t> self_bdim;
  10992. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  10993. Tensor src_value;
  10994. optional<int64_t> src_bdim;
  10995. std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
  10996. auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, offset, dim1, dim2);
  10997. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  10998. }
  10999. template <typename batch_rule_t, batch_rule_t batch_rule>
  11000. at::Tensor as_strided_scatter_generated_plumbing(const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) {
  11001. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11002. auto maybe_layer = maybeCurrentDynamicLayer();
  11003. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11004. int64_t cur_level = maybe_layer->layerId();
  11005. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
  11006. return at::_ops::as_strided_scatter::call(self, src, size, stride, storage_offset);
  11007. }
  11008. Tensor self_value;
  11009. optional<int64_t> self_bdim;
  11010. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11011. Tensor src_value;
  11012. optional<int64_t> src_bdim;
  11013. std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
  11014. auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, size, stride, storage_offset);
  11015. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11016. }
  11017. template <typename batch_rule_t, batch_rule_t batch_rule>
  11018. at::Tensor smm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2) {
  11019. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11020. auto maybe_layer = maybeCurrentDynamicLayer();
  11021. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11022. int64_t cur_level = maybe_layer->layerId();
  11023. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
  11024. return at::_ops::smm::call(self, mat2);
  11025. }
  11026. Tensor self_value;
  11027. optional<int64_t> self_bdim;
  11028. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11029. Tensor mat2_value;
  11030. optional<int64_t> mat2_bdim;
  11031. std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
  11032. auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim);
  11033. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11034. }
  11035. template <typename batch_rule_t, batch_rule_t batch_rule>
  11036. at::Tensor softmax_int_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
  11037. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11038. auto maybe_layer = maybeCurrentDynamicLayer();
  11039. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11040. int64_t cur_level = maybe_layer->layerId();
  11041. if (!isBatchedAtLevel(self, cur_level)) {
  11042. return at::_ops::softmax_int::call(self, dim, dtype);
  11043. }
  11044. Tensor self_value;
  11045. optional<int64_t> self_bdim;
  11046. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11047. auto results = batch_rule(self_value, self_bdim, dim, dtype);
  11048. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11049. }
  11050. template <typename batch_rule_t, batch_rule_t batch_rule>
  11051. at::Tensor softmax_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
  11052. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11053. auto maybe_layer = maybeCurrentDynamicLayer();
  11054. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11055. int64_t cur_level = maybe_layer->layerId();
  11056. if (!isBatchedAtLevel(self, cur_level)) {
  11057. return at::_ops::softmax_Dimname::call(self, dim, dtype);
  11058. }
  11059. Tensor self_value;
  11060. optional<int64_t> self_bdim;
  11061. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11062. auto results = batch_rule(self_value, self_bdim, dim, dtype);
  11063. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11064. }
  11065. template <typename batch_rule_t, batch_rule_t batch_rule>
  11066. at::Tensor _softmax_generated_plumbing(const at::Tensor & self, int64_t dim, bool half_to_float) {
  11067. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11068. auto maybe_layer = maybeCurrentDynamicLayer();
  11069. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11070. int64_t cur_level = maybe_layer->layerId();
  11071. if (!isBatchedAtLevel(self, cur_level)) {
  11072. return at::_ops::_softmax::call(self, dim, half_to_float);
  11073. }
  11074. Tensor self_value;
  11075. optional<int64_t> self_bdim;
  11076. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11077. auto results = batch_rule(self_value, self_bdim, dim, half_to_float);
  11078. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11079. }
  11080. template <typename batch_rule_t, batch_rule_t batch_rule>
  11081. at::Tensor _softmax_backward_data_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
  11082. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11083. auto maybe_layer = maybeCurrentDynamicLayer();
  11084. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11085. int64_t cur_level = maybe_layer->layerId();
  11086. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level)) {
  11087. return at::_ops::_softmax_backward_data::call(grad_output, output, dim, input_dtype);
  11088. }
  11089. Tensor grad_output_value;
  11090. optional<int64_t> grad_output_bdim;
  11091. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  11092. Tensor output_value;
  11093. optional<int64_t> output_bdim;
  11094. std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
  11095. auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, dim, input_dtype);
  11096. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11097. }
  11098. template <typename batch_rule_t, batch_rule_t batch_rule>
  11099. ::std::vector<at::Tensor> unsafe_split_Tensor_generated_plumbing(const at::Tensor & self, c10::SymInt split_size, int64_t dim) {
  11100. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11101. auto maybe_layer = maybeCurrentDynamicLayer();
  11102. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11103. int64_t cur_level = maybe_layer->layerId();
  11104. if (!isBatchedAtLevel(self, cur_level)) {
  11105. return at::_ops::unsafe_split_Tensor::call(self, split_size, dim);
  11106. }
  11107. Tensor self_value;
  11108. optional<int64_t> self_bdim;
  11109. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11110. auto results = batch_rule(self_value, self_bdim, split_size, dim);
  11111. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  11112. }
  11113. template <typename batch_rule_t, batch_rule_t batch_rule>
  11114. ::std::vector<at::Tensor> split_Tensor_generated_plumbing(const at::Tensor & self, c10::SymInt split_size, int64_t dim) {
  11115. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11116. auto maybe_layer = maybeCurrentDynamicLayer();
  11117. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11118. int64_t cur_level = maybe_layer->layerId();
  11119. if (!isBatchedAtLevel(self, cur_level)) {
  11120. return at::_ops::split_Tensor::call(self, split_size, dim);
  11121. }
  11122. Tensor self_value;
  11123. optional<int64_t> self_bdim;
  11124. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11125. auto results = batch_rule(self_value, self_bdim, split_size, dim);
  11126. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  11127. }
  11128. template <typename batch_rule_t, batch_rule_t batch_rule>
  11129. ::std::vector<at::Tensor> split_sizes_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_size, int64_t dim) {
  11130. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11131. auto maybe_layer = maybeCurrentDynamicLayer();
  11132. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11133. int64_t cur_level = maybe_layer->layerId();
  11134. if (!isBatchedAtLevel(self, cur_level)) {
  11135. return at::_ops::split_sizes::call(self, split_size, dim);
  11136. }
  11137. Tensor self_value;
  11138. optional<int64_t> self_bdim;
  11139. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11140. auto results = batch_rule(self_value, self_bdim, split_size, dim);
  11141. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  11142. }
  11143. template <typename batch_rule_t, batch_rule_t batch_rule>
  11144. ::std::vector<at::Tensor> unsafe_split_with_sizes_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) {
  11145. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11146. auto maybe_layer = maybeCurrentDynamicLayer();
  11147. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11148. int64_t cur_level = maybe_layer->layerId();
  11149. if (!isBatchedAtLevel(self, cur_level)) {
  11150. return at::_ops::unsafe_split_with_sizes::call(self, split_sizes, dim);
  11151. }
  11152. Tensor self_value;
  11153. optional<int64_t> self_bdim;
  11154. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11155. auto results = batch_rule(self_value, self_bdim, split_sizes, dim);
  11156. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  11157. }
  11158. template <typename batch_rule_t, batch_rule_t batch_rule>
  11159. ::std::vector<at::Tensor> split_with_sizes_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) {
  11160. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11161. auto maybe_layer = maybeCurrentDynamicLayer();
  11162. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11163. int64_t cur_level = maybe_layer->layerId();
  11164. if (!isBatchedAtLevel(self, cur_level)) {
  11165. return at::_ops::split_with_sizes::call(self, split_sizes, dim);
  11166. }
  11167. Tensor self_value;
  11168. optional<int64_t> self_bdim;
  11169. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11170. auto results = batch_rule(self_value, self_bdim, split_sizes, dim);
  11171. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  11172. }
  11173. template <typename batch_rule_t, batch_rule_t batch_rule>
  11174. ::std::vector<at::Tensor> hsplit_int_generated_plumbing(const at::Tensor & self, int64_t sections) {
  11175. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11176. auto maybe_layer = maybeCurrentDynamicLayer();
  11177. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11178. int64_t cur_level = maybe_layer->layerId();
  11179. if (!isBatchedAtLevel(self, cur_level)) {
  11180. return at::_ops::hsplit_int::call(self, sections);
  11181. }
  11182. Tensor self_value;
  11183. optional<int64_t> self_bdim;
  11184. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11185. auto results = batch_rule(self_value, self_bdim, sections);
  11186. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  11187. }
  11188. template <typename batch_rule_t, batch_rule_t batch_rule>
  11189. ::std::vector<at::Tensor> hsplit_array_generated_plumbing(const at::Tensor & self, at::IntArrayRef indices) {
  11190. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11191. auto maybe_layer = maybeCurrentDynamicLayer();
  11192. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11193. int64_t cur_level = maybe_layer->layerId();
  11194. if (!isBatchedAtLevel(self, cur_level)) {
  11195. return at::_ops::hsplit_array::call(self, indices);
  11196. }
  11197. Tensor self_value;
  11198. optional<int64_t> self_bdim;
  11199. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11200. auto results = batch_rule(self_value, self_bdim, indices);
  11201. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  11202. }
  11203. template <typename batch_rule_t, batch_rule_t batch_rule>
  11204. ::std::vector<at::Tensor> vsplit_int_generated_plumbing(const at::Tensor & self, int64_t sections) {
  11205. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11206. auto maybe_layer = maybeCurrentDynamicLayer();
  11207. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11208. int64_t cur_level = maybe_layer->layerId();
  11209. if (!isBatchedAtLevel(self, cur_level)) {
  11210. return at::_ops::vsplit_int::call(self, sections);
  11211. }
  11212. Tensor self_value;
  11213. optional<int64_t> self_bdim;
  11214. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11215. auto results = batch_rule(self_value, self_bdim, sections);
  11216. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  11217. }
  11218. template <typename batch_rule_t, batch_rule_t batch_rule>
  11219. ::std::vector<at::Tensor> vsplit_array_generated_plumbing(const at::Tensor & self, at::IntArrayRef indices) {
  11220. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11221. auto maybe_layer = maybeCurrentDynamicLayer();
  11222. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11223. int64_t cur_level = maybe_layer->layerId();
  11224. if (!isBatchedAtLevel(self, cur_level)) {
  11225. return at::_ops::vsplit_array::call(self, indices);
  11226. }
  11227. Tensor self_value;
  11228. optional<int64_t> self_bdim;
  11229. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11230. auto results = batch_rule(self_value, self_bdim, indices);
  11231. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  11232. }
  11233. template <typename batch_rule_t, batch_rule_t batch_rule>
  11234. ::std::vector<at::Tensor> dsplit_int_generated_plumbing(const at::Tensor & self, int64_t sections) {
  11235. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11236. auto maybe_layer = maybeCurrentDynamicLayer();
  11237. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11238. int64_t cur_level = maybe_layer->layerId();
  11239. if (!isBatchedAtLevel(self, cur_level)) {
  11240. return at::_ops::dsplit_int::call(self, sections);
  11241. }
  11242. Tensor self_value;
  11243. optional<int64_t> self_bdim;
  11244. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11245. auto results = batch_rule(self_value, self_bdim, sections);
  11246. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  11247. }
  11248. template <typename batch_rule_t, batch_rule_t batch_rule>
  11249. ::std::vector<at::Tensor> dsplit_array_generated_plumbing(const at::Tensor & self, at::IntArrayRef indices) {
  11250. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11251. auto maybe_layer = maybeCurrentDynamicLayer();
  11252. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11253. int64_t cur_level = maybe_layer->layerId();
  11254. if (!isBatchedAtLevel(self, cur_level)) {
  11255. return at::_ops::dsplit_array::call(self, indices);
  11256. }
  11257. Tensor self_value;
  11258. optional<int64_t> self_bdim;
  11259. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11260. auto results = batch_rule(self_value, self_bdim, indices);
  11261. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  11262. }
  11263. template <typename batch_rule_t, batch_rule_t batch_rule>
  11264. at::Tensor squeeze_generated_plumbing(const at::Tensor & self) {
  11265. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11266. auto maybe_layer = maybeCurrentDynamicLayer();
  11267. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11268. int64_t cur_level = maybe_layer->layerId();
  11269. if (!isBatchedAtLevel(self, cur_level)) {
  11270. return at::_ops::squeeze::call(self);
  11271. }
  11272. Tensor self_value;
  11273. optional<int64_t> self_bdim;
  11274. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11275. auto results = batch_rule(self_value, self_bdim);
  11276. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11277. }
  11278. template <typename batch_rule_t, batch_rule_t batch_rule>
  11279. at::Tensor squeeze_dim_generated_plumbing(const at::Tensor & self, int64_t dim) {
  11280. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11281. auto maybe_layer = maybeCurrentDynamicLayer();
  11282. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11283. int64_t cur_level = maybe_layer->layerId();
  11284. if (!isBatchedAtLevel(self, cur_level)) {
  11285. return at::_ops::squeeze_dim::call(self, dim);
  11286. }
  11287. Tensor self_value;
  11288. optional<int64_t> self_bdim;
  11289. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11290. auto results = batch_rule(self_value, self_bdim, dim);
  11291. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11292. }
  11293. template <typename batch_rule_t, batch_rule_t batch_rule>
  11294. at::Tensor squeeze_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim) {
  11295. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11296. auto maybe_layer = maybeCurrentDynamicLayer();
  11297. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11298. int64_t cur_level = maybe_layer->layerId();
  11299. if (!isBatchedAtLevel(self, cur_level)) {
  11300. return at::_ops::squeeze_dimname::call(self, dim);
  11301. }
  11302. Tensor self_value;
  11303. optional<int64_t> self_bdim;
  11304. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11305. auto results = batch_rule(self_value, self_bdim, dim);
  11306. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11307. }
  11308. template <typename batch_rule_t, batch_rule_t batch_rule>
  11309. at::Tensor squeeze_dims_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim) {
  11310. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11311. auto maybe_layer = maybeCurrentDynamicLayer();
  11312. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11313. int64_t cur_level = maybe_layer->layerId();
  11314. if (!isBatchedAtLevel(self, cur_level)) {
  11315. return at::_ops::squeeze_dims::call(self, dim);
  11316. }
  11317. Tensor self_value;
  11318. optional<int64_t> self_bdim;
  11319. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11320. auto results = batch_rule(self_value, self_bdim, dim);
  11321. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11322. }
  11323. template <typename batch_rule_t, batch_rule_t batch_rule>
  11324. at::Tensor sspaddmm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
  11325. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11326. auto maybe_layer = maybeCurrentDynamicLayer();
  11327. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11328. int64_t cur_level = maybe_layer->layerId();
  11329. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
  11330. return at::_ops::sspaddmm::call(self, mat1, mat2, beta, alpha);
  11331. }
  11332. Tensor self_value;
  11333. optional<int64_t> self_bdim;
  11334. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11335. Tensor mat1_value;
  11336. optional<int64_t> mat1_bdim;
  11337. std::tie(mat1_value, mat1_bdim) = unwrapTensorAtLevel(mat1, cur_level);
  11338. Tensor mat2_value;
  11339. optional<int64_t> mat2_bdim;
  11340. std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
  11341. auto results = batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha);
  11342. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11343. }
  11344. template <typename batch_rule_t, batch_rule_t batch_rule>
  11345. at::Tensor stack_generated_plumbing(at::TensorList tensors, int64_t dim) {
  11346. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11347. auto maybe_layer = maybeCurrentDynamicLayer();
  11348. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11349. int64_t cur_level = maybe_layer->layerId();
  11350. if (!isBatchedAtLevel(tensors, cur_level)) {
  11351. return at::_ops::stack::call(tensors, dim);
  11352. }
  11353. auto results = batch_rule(tensors, dim);
  11354. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11355. }
  11356. template <typename batch_rule_t, batch_rule_t batch_rule>
  11357. at::Tensor _stack_generated_plumbing(at::TensorList tensors, int64_t dim) {
  11358. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11359. auto maybe_layer = maybeCurrentDynamicLayer();
  11360. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11361. int64_t cur_level = maybe_layer->layerId();
  11362. if (!isBatchedAtLevel(tensors, cur_level)) {
  11363. return at::_ops::_stack::call(tensors, dim);
  11364. }
  11365. auto results = batch_rule(tensors, dim);
  11366. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11367. }
  11368. template <typename batch_rule_t, batch_rule_t batch_rule>
  11369. at::Tensor hstack_generated_plumbing(at::TensorList tensors) {
  11370. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11371. auto maybe_layer = maybeCurrentDynamicLayer();
  11372. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11373. int64_t cur_level = maybe_layer->layerId();
  11374. if (!isBatchedAtLevel(tensors, cur_level)) {
  11375. return at::_ops::hstack::call(tensors);
  11376. }
  11377. auto results = batch_rule(tensors);
  11378. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11379. }
  11380. template <typename batch_rule_t, batch_rule_t batch_rule>
  11381. at::Tensor vstack_generated_plumbing(at::TensorList tensors) {
  11382. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11383. auto maybe_layer = maybeCurrentDynamicLayer();
  11384. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11385. int64_t cur_level = maybe_layer->layerId();
  11386. if (!isBatchedAtLevel(tensors, cur_level)) {
  11387. return at::_ops::vstack::call(tensors);
  11388. }
  11389. auto results = batch_rule(tensors);
  11390. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11391. }
  11392. template <typename batch_rule_t, batch_rule_t batch_rule>
  11393. at::Tensor dstack_generated_plumbing(at::TensorList tensors) {
  11394. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11395. auto maybe_layer = maybeCurrentDynamicLayer();
  11396. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11397. int64_t cur_level = maybe_layer->layerId();
  11398. if (!isBatchedAtLevel(tensors, cur_level)) {
  11399. return at::_ops::dstack::call(tensors);
  11400. }
  11401. auto results = batch_rule(tensors);
  11402. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11403. }
  11404. template <typename batch_rule_t, batch_rule_t batch_rule>
  11405. at::Tensor stft_generated_plumbing(const at::Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const c10::optional<at::Tensor> & window, bool normalized, c10::optional<bool> onesided, c10::optional<bool> return_complex) {
  11406. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11407. auto maybe_layer = maybeCurrentDynamicLayer();
  11408. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11409. int64_t cur_level = maybe_layer->layerId();
  11410. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(window, cur_level)) {
  11411. return at::_ops::stft::call(self, n_fft, hop_length, win_length, window, normalized, onesided, return_complex);
  11412. }
  11413. Tensor self_value;
  11414. optional<int64_t> self_bdim;
  11415. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11416. optional<Tensor> window_value;
  11417. optional<int64_t> window_bdim;
  11418. if (window) {
  11419. std::tie(window_value, window_bdim) = unwrapTensorAtLevel(window.value(), cur_level);
  11420. }
  11421. auto results = batch_rule(self_value, self_bdim, n_fft, hop_length, win_length, window_value, window_bdim, normalized, onesided, return_complex);
  11422. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11423. }
  11424. template <typename batch_rule_t, batch_rule_t batch_rule>
  11425. at::Tensor stft_center_generated_plumbing(const at::Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const c10::optional<at::Tensor> & window, bool center, c10::string_view pad_mode, bool normalized, c10::optional<bool> onesided, c10::optional<bool> return_complex) {
  11426. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11427. auto maybe_layer = maybeCurrentDynamicLayer();
  11428. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11429. int64_t cur_level = maybe_layer->layerId();
  11430. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(window, cur_level)) {
  11431. return at::_ops::stft_center::call(self, n_fft, hop_length, win_length, window, center, pad_mode, normalized, onesided, return_complex);
  11432. }
  11433. Tensor self_value;
  11434. optional<int64_t> self_bdim;
  11435. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11436. optional<Tensor> window_value;
  11437. optional<int64_t> window_bdim;
  11438. if (window) {
  11439. std::tie(window_value, window_bdim) = unwrapTensorAtLevel(window.value(), cur_level);
  11440. }
  11441. auto results = batch_rule(self_value, self_bdim, n_fft, hop_length, win_length, window_value, window_bdim, center, pad_mode, normalized, onesided, return_complex);
  11442. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11443. }
  11444. template <typename batch_rule_t, batch_rule_t batch_rule>
  11445. at::Tensor istft_generated_plumbing(const at::Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const c10::optional<at::Tensor> & window, bool center, bool normalized, c10::optional<bool> onesided, c10::optional<int64_t> length, bool return_complex) {
  11446. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11447. auto maybe_layer = maybeCurrentDynamicLayer();
  11448. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11449. int64_t cur_level = maybe_layer->layerId();
  11450. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(window, cur_level)) {
  11451. return at::_ops::istft::call(self, n_fft, hop_length, win_length, window, center, normalized, onesided, length, return_complex);
  11452. }
  11453. Tensor self_value;
  11454. optional<int64_t> self_bdim;
  11455. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11456. optional<Tensor> window_value;
  11457. optional<int64_t> window_bdim;
  11458. if (window) {
  11459. std::tie(window_value, window_bdim) = unwrapTensorAtLevel(window.value(), cur_level);
  11460. }
  11461. auto results = batch_rule(self_value, self_bdim, n_fft, hop_length, win_length, window_value, window_bdim, center, normalized, onesided, length, return_complex);
  11462. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11463. }
  11464. template <typename batch_rule_t, batch_rule_t batch_rule>
  11465. at::Tensor sum_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
  11466. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11467. auto maybe_layer = maybeCurrentDynamicLayer();
  11468. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11469. int64_t cur_level = maybe_layer->layerId();
  11470. if (!isBatchedAtLevel(self, cur_level)) {
  11471. return at::_ops::sum::call(self, dtype);
  11472. }
  11473. Tensor self_value;
  11474. optional<int64_t> self_bdim;
  11475. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11476. auto results = batch_rule(self_value, self_bdim, dtype);
  11477. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11478. }
  11479. template <typename batch_rule_t, batch_rule_t batch_rule>
  11480. at::Tensor sum_dim_IntList_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
  11481. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11482. auto maybe_layer = maybeCurrentDynamicLayer();
  11483. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11484. int64_t cur_level = maybe_layer->layerId();
  11485. if (!isBatchedAtLevel(self, cur_level)) {
  11486. return at::_ops::sum_dim_IntList::call(self, dim, keepdim, dtype);
  11487. }
  11488. Tensor self_value;
  11489. optional<int64_t> self_bdim;
  11490. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11491. auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
  11492. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11493. }
  11494. template <typename batch_rule_t, batch_rule_t batch_rule>
  11495. at::Tensor sum_dim_DimnameList_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
  11496. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11497. auto maybe_layer = maybeCurrentDynamicLayer();
  11498. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11499. int64_t cur_level = maybe_layer->layerId();
  11500. if (!isBatchedAtLevel(self, cur_level)) {
  11501. return at::_ops::sum_dim_DimnameList::call(self, dim, keepdim, dtype);
  11502. }
  11503. Tensor self_value;
  11504. optional<int64_t> self_bdim;
  11505. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11506. auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
  11507. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11508. }
  11509. template <typename batch_rule_t, batch_rule_t batch_rule>
  11510. at::Tensor _nested_sum_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim) {
  11511. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11512. auto maybe_layer = maybeCurrentDynamicLayer();
  11513. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11514. int64_t cur_level = maybe_layer->layerId();
  11515. if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level)) {
  11516. return at::_ops::_nested_sum_backward::call(grad, self, dim, keepdim);
  11517. }
  11518. Tensor grad_value;
  11519. optional<int64_t> grad_bdim;
  11520. std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
  11521. Tensor self_value;
  11522. optional<int64_t> self_bdim;
  11523. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11524. auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, dim, keepdim);
  11525. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11526. }
  11527. template <typename batch_rule_t, batch_rule_t batch_rule>
  11528. at::Tensor nansum_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
  11529. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11530. auto maybe_layer = maybeCurrentDynamicLayer();
  11531. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11532. int64_t cur_level = maybe_layer->layerId();
  11533. if (!isBatchedAtLevel(self, cur_level)) {
  11534. return at::_ops::nansum::call(self, dim, keepdim, dtype);
  11535. }
  11536. Tensor self_value;
  11537. optional<int64_t> self_bdim;
  11538. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11539. auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
  11540. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11541. }
  11542. template <typename batch_rule_t, batch_rule_t batch_rule>
  11543. at::Tensor sum_to_size_generated_plumbing(const at::Tensor & self, at::IntArrayRef size) {
  11544. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11545. auto maybe_layer = maybeCurrentDynamicLayer();
  11546. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11547. int64_t cur_level = maybe_layer->layerId();
  11548. if (!isBatchedAtLevel(self, cur_level)) {
  11549. return at::_ops::sum_to_size::call(self, size);
  11550. }
  11551. Tensor self_value;
  11552. optional<int64_t> self_bdim;
  11553. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11554. auto results = batch_rule(self_value, self_bdim, size);
  11555. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11556. }
  11557. template <typename batch_rule_t, batch_rule_t batch_rule>
  11558. at::Tensor sqrt_generated_plumbing(const at::Tensor & self) {
  11559. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11560. auto maybe_layer = maybeCurrentDynamicLayer();
  11561. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11562. int64_t cur_level = maybe_layer->layerId();
  11563. if (!isBatchedAtLevel(self, cur_level)) {
  11564. return at::_ops::sqrt::call(self);
  11565. }
  11566. Tensor self_value;
  11567. optional<int64_t> self_bdim;
  11568. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11569. auto results = batch_rule(self_value, self_bdim);
  11570. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11571. }
  11572. template <typename batch_rule_t, batch_rule_t batch_rule>
  11573. at::Tensor & sqrt__generated_plumbing(at::Tensor & self) {
  11574. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11575. auto maybe_layer = maybeCurrentDynamicLayer();
  11576. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  11577. int64_t cur_level = maybe_layer->layerId();
  11578. if (!isBatchedAtLevel(self, cur_level)) {
  11579. return at::_ops::sqrt_::call(self);
  11580. }
  11581. Tensor self_value;
  11582. optional<int64_t> self_bdim;
  11583. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11584. batch_rule(self_value, self_bdim);
  11585. return self;
  11586. }
  11587. template <typename batch_rule_t, batch_rule_t batch_rule>
  11588. at::Tensor square_generated_plumbing(const at::Tensor & self) {
  11589. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11590. auto maybe_layer = maybeCurrentDynamicLayer();
  11591. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11592. int64_t cur_level = maybe_layer->layerId();
  11593. if (!isBatchedAtLevel(self, cur_level)) {
  11594. return at::_ops::square::call(self);
  11595. }
  11596. Tensor self_value;
  11597. optional<int64_t> self_bdim;
  11598. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11599. auto results = batch_rule(self_value, self_bdim);
  11600. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11601. }
  11602. template <typename batch_rule_t, batch_rule_t batch_rule>
  11603. at::Tensor & square__generated_plumbing(at::Tensor & self) {
  11604. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11605. auto maybe_layer = maybeCurrentDynamicLayer();
  11606. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  11607. int64_t cur_level = maybe_layer->layerId();
  11608. if (!isBatchedAtLevel(self, cur_level)) {
  11609. return at::_ops::square_::call(self);
  11610. }
  11611. Tensor self_value;
  11612. optional<int64_t> self_bdim;
  11613. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11614. batch_rule(self_value, self_bdim);
  11615. return self;
  11616. }
  11617. template <typename batch_rule_t, batch_rule_t batch_rule>
  11618. at::Tensor std_generated_plumbing(const at::Tensor & self, bool unbiased) {
  11619. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11620. auto maybe_layer = maybeCurrentDynamicLayer();
  11621. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11622. int64_t cur_level = maybe_layer->layerId();
  11623. if (!isBatchedAtLevel(self, cur_level)) {
  11624. return at::_ops::std::call(self, unbiased);
  11625. }
  11626. Tensor self_value;
  11627. optional<int64_t> self_bdim;
  11628. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11629. auto results = batch_rule(self_value, self_bdim, unbiased);
  11630. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11631. }
  11632. template <typename batch_rule_t, batch_rule_t batch_rule>
  11633. at::Tensor std_dim_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
  11634. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11635. auto maybe_layer = maybeCurrentDynamicLayer();
  11636. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11637. int64_t cur_level = maybe_layer->layerId();
  11638. if (!isBatchedAtLevel(self, cur_level)) {
  11639. return at::_ops::std_dim::call(self, dim, unbiased, keepdim);
  11640. }
  11641. Tensor self_value;
  11642. optional<int64_t> self_bdim;
  11643. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11644. auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim);
  11645. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11646. }
  11647. template <typename batch_rule_t, batch_rule_t batch_rule>
  11648. at::Tensor std_correction_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim) {
  11649. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11650. auto maybe_layer = maybeCurrentDynamicLayer();
  11651. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11652. int64_t cur_level = maybe_layer->layerId();
  11653. if (!isBatchedAtLevel(self, cur_level)) {
  11654. return at::_ops::std_correction::call(self, dim, correction, keepdim);
  11655. }
  11656. Tensor self_value;
  11657. optional<int64_t> self_bdim;
  11658. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11659. auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim);
  11660. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11661. }
  11662. template <typename batch_rule_t, batch_rule_t batch_rule>
  11663. ::std::tuple<at::Tensor,at::Tensor> std_mean_generated_plumbing(const at::Tensor & self, bool unbiased) {
  11664. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11665. auto maybe_layer = maybeCurrentDynamicLayer();
  11666. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11667. int64_t cur_level = maybe_layer->layerId();
  11668. if (!isBatchedAtLevel(self, cur_level)) {
  11669. return at::_ops::std_mean::call(self, unbiased);
  11670. }
  11671. Tensor self_value;
  11672. optional<int64_t> self_bdim;
  11673. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11674. auto results = batch_rule(self_value, self_bdim, unbiased);
  11675. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  11676. }
  11677. template <typename batch_rule_t, batch_rule_t batch_rule>
  11678. ::std::tuple<at::Tensor,at::Tensor> std_mean_dim_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
  11679. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11680. auto maybe_layer = maybeCurrentDynamicLayer();
  11681. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11682. int64_t cur_level = maybe_layer->layerId();
  11683. if (!isBatchedAtLevel(self, cur_level)) {
  11684. return at::_ops::std_mean_dim::call(self, dim, unbiased, keepdim);
  11685. }
  11686. Tensor self_value;
  11687. optional<int64_t> self_bdim;
  11688. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11689. auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim);
  11690. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  11691. }
  11692. template <typename batch_rule_t, batch_rule_t batch_rule>
  11693. ::std::tuple<at::Tensor,at::Tensor> std_mean_correction_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim) {
  11694. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11695. auto maybe_layer = maybeCurrentDynamicLayer();
  11696. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11697. int64_t cur_level = maybe_layer->layerId();
  11698. if (!isBatchedAtLevel(self, cur_level)) {
  11699. return at::_ops::std_mean_correction::call(self, dim, correction, keepdim);
  11700. }
  11701. Tensor self_value;
  11702. optional<int64_t> self_bdim;
  11703. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11704. auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim);
  11705. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  11706. }
  11707. template <typename batch_rule_t, batch_rule_t batch_rule>
  11708. ::std::tuple<at::Tensor,at::Tensor> std_mean_names_dim_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
  11709. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11710. auto maybe_layer = maybeCurrentDynamicLayer();
  11711. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11712. int64_t cur_level = maybe_layer->layerId();
  11713. if (!isBatchedAtLevel(self, cur_level)) {
  11714. return at::_ops::std_mean_names_dim::call(self, dim, unbiased, keepdim);
  11715. }
  11716. Tensor self_value;
  11717. optional<int64_t> self_bdim;
  11718. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11719. auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim);
  11720. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  11721. }
  11722. template <typename batch_rule_t, batch_rule_t batch_rule>
  11723. ::std::tuple<at::Tensor,at::Tensor> std_mean_correction_names_generated_plumbing(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim) {
  11724. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11725. auto maybe_layer = maybeCurrentDynamicLayer();
  11726. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11727. int64_t cur_level = maybe_layer->layerId();
  11728. if (!isBatchedAtLevel(self, cur_level)) {
  11729. return at::_ops::std_mean_correction_names::call(self, dim, correction, keepdim);
  11730. }
  11731. Tensor self_value;
  11732. optional<int64_t> self_bdim;
  11733. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11734. auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim);
  11735. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  11736. }
  11737. template <typename batch_rule_t, batch_rule_t batch_rule>
  11738. at::Tensor std_names_dim_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
  11739. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11740. auto maybe_layer = maybeCurrentDynamicLayer();
  11741. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11742. int64_t cur_level = maybe_layer->layerId();
  11743. if (!isBatchedAtLevel(self, cur_level)) {
  11744. return at::_ops::std_names_dim::call(self, dim, unbiased, keepdim);
  11745. }
  11746. Tensor self_value;
  11747. optional<int64_t> self_bdim;
  11748. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11749. auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim);
  11750. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11751. }
  11752. template <typename batch_rule_t, batch_rule_t batch_rule>
  11753. at::Tensor std_correction_names_generated_plumbing(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim) {
  11754. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11755. auto maybe_layer = maybeCurrentDynamicLayer();
  11756. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11757. int64_t cur_level = maybe_layer->layerId();
  11758. if (!isBatchedAtLevel(self, cur_level)) {
  11759. return at::_ops::std_correction_names::call(self, dim, correction, keepdim);
  11760. }
  11761. Tensor self_value;
  11762. optional<int64_t> self_bdim;
  11763. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11764. auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim);
  11765. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11766. }
  11767. template <typename batch_rule_t, batch_rule_t batch_rule>
  11768. at::Tensor prod_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
  11769. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11770. auto maybe_layer = maybeCurrentDynamicLayer();
  11771. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11772. int64_t cur_level = maybe_layer->layerId();
  11773. if (!isBatchedAtLevel(self, cur_level)) {
  11774. return at::_ops::prod::call(self, dtype);
  11775. }
  11776. Tensor self_value;
  11777. optional<int64_t> self_bdim;
  11778. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11779. auto results = batch_rule(self_value, self_bdim, dtype);
  11780. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11781. }
  11782. template <typename batch_rule_t, batch_rule_t batch_rule>
  11783. at::Tensor prod_dim_int_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
  11784. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11785. auto maybe_layer = maybeCurrentDynamicLayer();
  11786. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11787. int64_t cur_level = maybe_layer->layerId();
  11788. if (!isBatchedAtLevel(self, cur_level)) {
  11789. return at::_ops::prod_dim_int::call(self, dim, keepdim, dtype);
  11790. }
  11791. Tensor self_value;
  11792. optional<int64_t> self_bdim;
  11793. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11794. auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
  11795. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11796. }
  11797. template <typename batch_rule_t, batch_rule_t batch_rule>
  11798. at::Tensor prod_dim_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
  11799. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11800. auto maybe_layer = maybeCurrentDynamicLayer();
  11801. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11802. int64_t cur_level = maybe_layer->layerId();
  11803. if (!isBatchedAtLevel(self, cur_level)) {
  11804. return at::_ops::prod_dim_Dimname::call(self, dim, keepdim, dtype);
  11805. }
  11806. Tensor self_value;
  11807. optional<int64_t> self_bdim;
  11808. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11809. auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
  11810. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11811. }
  11812. template <typename batch_rule_t, batch_rule_t batch_rule>
  11813. at::Tensor t_generated_plumbing(const at::Tensor & self) {
  11814. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11815. auto maybe_layer = maybeCurrentDynamicLayer();
  11816. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11817. int64_t cur_level = maybe_layer->layerId();
  11818. if (!isBatchedAtLevel(self, cur_level)) {
  11819. return at::_ops::t::call(self);
  11820. }
  11821. Tensor self_value;
  11822. optional<int64_t> self_bdim;
  11823. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11824. auto results = batch_rule(self_value, self_bdim);
  11825. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11826. }
  11827. template <typename batch_rule_t, batch_rule_t batch_rule>
  11828. at::Tensor tan_generated_plumbing(const at::Tensor & self) {
  11829. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11830. auto maybe_layer = maybeCurrentDynamicLayer();
  11831. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11832. int64_t cur_level = maybe_layer->layerId();
  11833. if (!isBatchedAtLevel(self, cur_level)) {
  11834. return at::_ops::tan::call(self);
  11835. }
  11836. Tensor self_value;
  11837. optional<int64_t> self_bdim;
  11838. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11839. auto results = batch_rule(self_value, self_bdim);
  11840. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11841. }
  11842. template <typename batch_rule_t, batch_rule_t batch_rule>
  11843. at::Tensor & tan__generated_plumbing(at::Tensor & self) {
  11844. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11845. auto maybe_layer = maybeCurrentDynamicLayer();
  11846. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  11847. int64_t cur_level = maybe_layer->layerId();
  11848. if (!isBatchedAtLevel(self, cur_level)) {
  11849. return at::_ops::tan_::call(self);
  11850. }
  11851. Tensor self_value;
  11852. optional<int64_t> self_bdim;
  11853. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11854. batch_rule(self_value, self_bdim);
  11855. return self;
  11856. }
  11857. template <typename batch_rule_t, batch_rule_t batch_rule>
  11858. at::Tensor tanh_generated_plumbing(const at::Tensor & self) {
  11859. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11860. auto maybe_layer = maybeCurrentDynamicLayer();
  11861. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11862. int64_t cur_level = maybe_layer->layerId();
  11863. if (!isBatchedAtLevel(self, cur_level)) {
  11864. return at::_ops::tanh::call(self);
  11865. }
  11866. Tensor self_value;
  11867. optional<int64_t> self_bdim;
  11868. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11869. auto results = batch_rule(self_value, self_bdim);
  11870. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11871. }
  11872. template <typename batch_rule_t, batch_rule_t batch_rule>
  11873. at::Tensor & tanh__generated_plumbing(at::Tensor & self) {
  11874. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11875. auto maybe_layer = maybeCurrentDynamicLayer();
  11876. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  11877. int64_t cur_level = maybe_layer->layerId();
  11878. if (!isBatchedAtLevel(self, cur_level)) {
  11879. return at::_ops::tanh_::call(self);
  11880. }
  11881. Tensor self_value;
  11882. optional<int64_t> self_bdim;
  11883. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11884. batch_rule(self_value, self_bdim);
  11885. return self;
  11886. }
  11887. template <typename batch_rule_t, batch_rule_t batch_rule>
  11888. at::Tensor tensordot_generated_plumbing(const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other) {
  11889. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11890. auto maybe_layer = maybeCurrentDynamicLayer();
  11891. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11892. int64_t cur_level = maybe_layer->layerId();
  11893. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  11894. return at::_ops::tensordot::call(self, other, dims_self, dims_other);
  11895. }
  11896. Tensor self_value;
  11897. optional<int64_t> self_bdim;
  11898. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11899. Tensor other_value;
  11900. optional<int64_t> other_bdim;
  11901. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  11902. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, dims_self, dims_other);
  11903. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11904. }
  11905. template <typename batch_rule_t, batch_rule_t batch_rule>
  11906. at::Tensor threshold_generated_plumbing(const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) {
  11907. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11908. auto maybe_layer = maybeCurrentDynamicLayer();
  11909. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11910. int64_t cur_level = maybe_layer->layerId();
  11911. if (!isBatchedAtLevel(self, cur_level)) {
  11912. return at::_ops::threshold::call(self, threshold, value);
  11913. }
  11914. Tensor self_value;
  11915. optional<int64_t> self_bdim;
  11916. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11917. auto results = batch_rule(self_value, self_bdim, threshold, value);
  11918. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11919. }
  11920. template <typename batch_rule_t, batch_rule_t batch_rule>
  11921. at::Tensor & threshold__generated_plumbing(at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) {
  11922. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11923. auto maybe_layer = maybeCurrentDynamicLayer();
  11924. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  11925. int64_t cur_level = maybe_layer->layerId();
  11926. if (!isBatchedAtLevel(self, cur_level)) {
  11927. return at::_ops::threshold_::call(self, threshold, value);
  11928. }
  11929. Tensor self_value;
  11930. optional<int64_t> self_bdim;
  11931. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11932. batch_rule(self_value, self_bdim, threshold, value);
  11933. return self;
  11934. }
  11935. template <typename batch_rule_t, batch_rule_t batch_rule>
  11936. at::Tensor threshold_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) {
  11937. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11938. auto maybe_layer = maybeCurrentDynamicLayer();
  11939. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11940. int64_t cur_level = maybe_layer->layerId();
  11941. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
  11942. return at::_ops::threshold_backward::call(grad_output, self, threshold);
  11943. }
  11944. Tensor grad_output_value;
  11945. optional<int64_t> grad_output_bdim;
  11946. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  11947. Tensor self_value;
  11948. optional<int64_t> self_bdim;
  11949. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11950. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, threshold);
  11951. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11952. }
  11953. template <typename batch_rule_t, batch_rule_t batch_rule>
  11954. at::Tensor tile_generated_plumbing(const at::Tensor & self, at::IntArrayRef dims) {
  11955. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11956. auto maybe_layer = maybeCurrentDynamicLayer();
  11957. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11958. int64_t cur_level = maybe_layer->layerId();
  11959. if (!isBatchedAtLevel(self, cur_level)) {
  11960. return at::_ops::tile::call(self, dims);
  11961. }
  11962. Tensor self_value;
  11963. optional<int64_t> self_bdim;
  11964. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11965. auto results = batch_rule(self_value, self_bdim, dims);
  11966. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11967. }
  11968. template <typename batch_rule_t, batch_rule_t batch_rule>
  11969. at::Tensor transpose_int_generated_plumbing(const at::Tensor & self, int64_t dim0, int64_t dim1) {
  11970. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11971. auto maybe_layer = maybeCurrentDynamicLayer();
  11972. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11973. int64_t cur_level = maybe_layer->layerId();
  11974. if (!isBatchedAtLevel(self, cur_level)) {
  11975. return at::_ops::transpose_int::call(self, dim0, dim1);
  11976. }
  11977. Tensor self_value;
  11978. optional<int64_t> self_bdim;
  11979. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11980. auto results = batch_rule(self_value, self_bdim, dim0, dim1);
  11981. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11982. }
  11983. template <typename batch_rule_t, batch_rule_t batch_rule>
  11984. at::Tensor transpose_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim0, at::Dimname dim1) {
  11985. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  11986. auto maybe_layer = maybeCurrentDynamicLayer();
  11987. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  11988. int64_t cur_level = maybe_layer->layerId();
  11989. if (!isBatchedAtLevel(self, cur_level)) {
  11990. return at::_ops::transpose_Dimname::call(self, dim0, dim1);
  11991. }
  11992. Tensor self_value;
  11993. optional<int64_t> self_bdim;
  11994. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  11995. auto results = batch_rule(self_value, self_bdim, dim0, dim1);
  11996. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  11997. }
  11998. template <typename batch_rule_t, batch_rule_t batch_rule>
  11999. at::Tensor _mkldnn_transpose_generated_plumbing(const at::Tensor & self, int64_t dim0, int64_t dim1) {
  12000. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12001. auto maybe_layer = maybeCurrentDynamicLayer();
  12002. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12003. int64_t cur_level = maybe_layer->layerId();
  12004. if (!isBatchedAtLevel(self, cur_level)) {
  12005. return at::_ops::_mkldnn_transpose::call(self, dim0, dim1);
  12006. }
  12007. Tensor self_value;
  12008. optional<int64_t> self_bdim;
  12009. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  12010. auto results = batch_rule(self_value, self_bdim, dim0, dim1);
  12011. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12012. }
  12013. template <typename batch_rule_t, batch_rule_t batch_rule>
  12014. at::Tensor & _mkldnn_transpose__generated_plumbing(at::Tensor & self, int64_t dim0, int64_t dim1) {
  12015. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12016. auto maybe_layer = maybeCurrentDynamicLayer();
  12017. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  12018. int64_t cur_level = maybe_layer->layerId();
  12019. if (!isBatchedAtLevel(self, cur_level)) {
  12020. return at::_ops::_mkldnn_transpose_::call(self, dim0, dim1);
  12021. }
  12022. Tensor self_value;
  12023. optional<int64_t> self_bdim;
  12024. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  12025. batch_rule(self_value, self_bdim, dim0, dim1);
  12026. return self;
  12027. }
  12028. template <typename batch_rule_t, batch_rule_t batch_rule>
  12029. at::Tensor one_hot_generated_plumbing(const at::Tensor & self, int64_t num_classes) {
  12030. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12031. auto maybe_layer = maybeCurrentDynamicLayer();
  12032. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12033. int64_t cur_level = maybe_layer->layerId();
  12034. if (!isBatchedAtLevel(self, cur_level)) {
  12035. return at::_ops::one_hot::call(self, num_classes);
  12036. }
  12037. Tensor self_value;
  12038. optional<int64_t> self_bdim;
  12039. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  12040. auto results = batch_rule(self_value, self_bdim, num_classes);
  12041. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12042. }
  12043. template <typename batch_rule_t, batch_rule_t batch_rule>
  12044. at::Tensor flip_generated_plumbing(const at::Tensor & self, at::IntArrayRef dims) {
  12045. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12046. auto maybe_layer = maybeCurrentDynamicLayer();
  12047. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12048. int64_t cur_level = maybe_layer->layerId();
  12049. if (!isBatchedAtLevel(self, cur_level)) {
  12050. return at::_ops::flip::call(self, dims);
  12051. }
  12052. Tensor self_value;
  12053. optional<int64_t> self_bdim;
  12054. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  12055. auto results = batch_rule(self_value, self_bdim, dims);
  12056. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12057. }
  12058. template <typename batch_rule_t, batch_rule_t batch_rule>
  12059. at::Tensor fliplr_generated_plumbing(const at::Tensor & self) {
  12060. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12061. auto maybe_layer = maybeCurrentDynamicLayer();
  12062. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12063. int64_t cur_level = maybe_layer->layerId();
  12064. if (!isBatchedAtLevel(self, cur_level)) {
  12065. return at::_ops::fliplr::call(self);
  12066. }
  12067. Tensor self_value;
  12068. optional<int64_t> self_bdim;
  12069. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  12070. auto results = batch_rule(self_value, self_bdim);
  12071. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12072. }
  12073. template <typename batch_rule_t, batch_rule_t batch_rule>
  12074. at::Tensor flipud_generated_plumbing(const at::Tensor & self) {
  12075. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12076. auto maybe_layer = maybeCurrentDynamicLayer();
  12077. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12078. int64_t cur_level = maybe_layer->layerId();
  12079. if (!isBatchedAtLevel(self, cur_level)) {
  12080. return at::_ops::flipud::call(self);
  12081. }
  12082. Tensor self_value;
  12083. optional<int64_t> self_bdim;
  12084. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  12085. auto results = batch_rule(self_value, self_bdim);
  12086. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12087. }
  12088. template <typename batch_rule_t, batch_rule_t batch_rule>
  12089. at::Tensor roll_generated_plumbing(const at::Tensor & self, at::IntArrayRef shifts, at::IntArrayRef dims) {
  12090. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12091. auto maybe_layer = maybeCurrentDynamicLayer();
  12092. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12093. int64_t cur_level = maybe_layer->layerId();
  12094. if (!isBatchedAtLevel(self, cur_level)) {
  12095. return at::_ops::roll::call(self, shifts, dims);
  12096. }
  12097. Tensor self_value;
  12098. optional<int64_t> self_bdim;
  12099. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  12100. auto results = batch_rule(self_value, self_bdim, shifts, dims);
  12101. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12102. }
  12103. template <typename batch_rule_t, batch_rule_t batch_rule>
  12104. at::Tensor rot90_generated_plumbing(const at::Tensor & self, int64_t k, at::IntArrayRef dims) {
  12105. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12106. auto maybe_layer = maybeCurrentDynamicLayer();
  12107. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12108. int64_t cur_level = maybe_layer->layerId();
  12109. if (!isBatchedAtLevel(self, cur_level)) {
  12110. return at::_ops::rot90::call(self, k, dims);
  12111. }
  12112. Tensor self_value;
  12113. optional<int64_t> self_bdim;
  12114. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  12115. auto results = batch_rule(self_value, self_bdim, k, dims);
  12116. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12117. }
  12118. template <typename batch_rule_t, batch_rule_t batch_rule>
  12119. at::Tensor trapezoid_x_generated_plumbing(const at::Tensor & y, const at::Tensor & x, int64_t dim) {
  12120. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12121. auto maybe_layer = maybeCurrentDynamicLayer();
  12122. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12123. int64_t cur_level = maybe_layer->layerId();
  12124. if (!isBatchedAtLevel(y, cur_level) && !isBatchedAtLevel(x, cur_level)) {
  12125. return at::_ops::trapezoid_x::call(y, x, dim);
  12126. }
  12127. Tensor y_value;
  12128. optional<int64_t> y_bdim;
  12129. std::tie(y_value, y_bdim) = unwrapTensorAtLevel(y, cur_level);
  12130. Tensor x_value;
  12131. optional<int64_t> x_bdim;
  12132. std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
  12133. auto results = batch_rule(y_value, y_bdim, x_value, x_bdim, dim);
  12134. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12135. }
  12136. template <typename batch_rule_t, batch_rule_t batch_rule>
  12137. at::Tensor trapezoid_dx_generated_plumbing(const at::Tensor & y, const at::Scalar & dx, int64_t dim) {
  12138. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12139. auto maybe_layer = maybeCurrentDynamicLayer();
  12140. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12141. int64_t cur_level = maybe_layer->layerId();
  12142. if (!isBatchedAtLevel(y, cur_level)) {
  12143. return at::_ops::trapezoid_dx::call(y, dx, dim);
  12144. }
  12145. Tensor y_value;
  12146. optional<int64_t> y_bdim;
  12147. std::tie(y_value, y_bdim) = unwrapTensorAtLevel(y, cur_level);
  12148. auto results = batch_rule(y_value, y_bdim, dx, dim);
  12149. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12150. }
  12151. template <typename batch_rule_t, batch_rule_t batch_rule>
  12152. at::Tensor trapz_x_generated_plumbing(const at::Tensor & y, const at::Tensor & x, int64_t dim) {
  12153. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12154. auto maybe_layer = maybeCurrentDynamicLayer();
  12155. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12156. int64_t cur_level = maybe_layer->layerId();
  12157. if (!isBatchedAtLevel(y, cur_level) && !isBatchedAtLevel(x, cur_level)) {
  12158. return at::_ops::trapz_x::call(y, x, dim);
  12159. }
  12160. Tensor y_value;
  12161. optional<int64_t> y_bdim;
  12162. std::tie(y_value, y_bdim) = unwrapTensorAtLevel(y, cur_level);
  12163. Tensor x_value;
  12164. optional<int64_t> x_bdim;
  12165. std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
  12166. auto results = batch_rule(y_value, y_bdim, x_value, x_bdim, dim);
  12167. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12168. }
  12169. template <typename batch_rule_t, batch_rule_t batch_rule>
  12170. at::Tensor trapz_dx_generated_plumbing(const at::Tensor & y, double dx, int64_t dim) {
  12171. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12172. auto maybe_layer = maybeCurrentDynamicLayer();
  12173. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12174. int64_t cur_level = maybe_layer->layerId();
  12175. if (!isBatchedAtLevel(y, cur_level)) {
  12176. return at::_ops::trapz_dx::call(y, dx, dim);
  12177. }
  12178. Tensor y_value;
  12179. optional<int64_t> y_bdim;
  12180. std::tie(y_value, y_bdim) = unwrapTensorAtLevel(y, cur_level);
  12181. auto results = batch_rule(y_value, y_bdim, dx, dim);
  12182. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12183. }
  12184. template <typename batch_rule_t, batch_rule_t batch_rule>
  12185. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _transform_bias_rescale_qkv_generated_plumbing(const at::Tensor & qkv, const at::Tensor & qkv_bias, int64_t num_heads) {
  12186. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12187. auto maybe_layer = maybeCurrentDynamicLayer();
  12188. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12189. int64_t cur_level = maybe_layer->layerId();
  12190. if (!isBatchedAtLevel(qkv, cur_level) && !isBatchedAtLevel(qkv_bias, cur_level)) {
  12191. return at::_ops::_transform_bias_rescale_qkv::call(qkv, qkv_bias, num_heads);
  12192. }
  12193. Tensor qkv_value;
  12194. optional<int64_t> qkv_bdim;
  12195. std::tie(qkv_value, qkv_bdim) = unwrapTensorAtLevel(qkv, cur_level);
  12196. Tensor qkv_bias_value;
  12197. optional<int64_t> qkv_bias_bdim;
  12198. std::tie(qkv_bias_value, qkv_bias_bdim) = unwrapTensorAtLevel(qkv_bias, cur_level);
  12199. auto results = batch_rule(qkv_value, qkv_bdim, qkv_bias_value, qkv_bias_bdim, num_heads);
  12200. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  12201. }
  12202. template <typename batch_rule_t, batch_rule_t batch_rule>
  12203. at::Tensor _nested_tensor_from_mask_generated_plumbing(const at::Tensor & t, const at::Tensor & mask, bool mask_check) {
  12204. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12205. auto maybe_layer = maybeCurrentDynamicLayer();
  12206. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12207. int64_t cur_level = maybe_layer->layerId();
  12208. if (!isBatchedAtLevel(t, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
  12209. return at::_ops::_nested_tensor_from_mask::call(t, mask, mask_check);
  12210. }
  12211. Tensor t_value;
  12212. optional<int64_t> t_bdim;
  12213. std::tie(t_value, t_bdim) = unwrapTensorAtLevel(t, cur_level);
  12214. Tensor mask_value;
  12215. optional<int64_t> mask_bdim;
  12216. std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
  12217. auto results = batch_rule(t_value, t_bdim, mask_value, mask_bdim, mask_check);
  12218. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12219. }
  12220. template <typename batch_rule_t, batch_rule_t batch_rule>
  12221. at::Tensor _nested_from_padded_generated_plumbing(const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213) {
  12222. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12223. auto maybe_layer = maybeCurrentDynamicLayer();
  12224. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12225. int64_t cur_level = maybe_layer->layerId();
  12226. if (!isBatchedAtLevel(padded, cur_level) && !isBatchedAtLevel(cpu_nested_shape_example, cur_level)) {
  12227. return at::_ops::_nested_from_padded::call(padded, cpu_nested_shape_example, fuse_transform_0213);
  12228. }
  12229. Tensor padded_value;
  12230. optional<int64_t> padded_bdim;
  12231. std::tie(padded_value, padded_bdim) = unwrapTensorAtLevel(padded, cur_level);
  12232. Tensor cpu_nested_shape_example_value;
  12233. optional<int64_t> cpu_nested_shape_example_bdim;
  12234. std::tie(cpu_nested_shape_example_value, cpu_nested_shape_example_bdim) = unwrapTensorAtLevel(cpu_nested_shape_example, cur_level);
  12235. auto results = batch_rule(padded_value, padded_bdim, cpu_nested_shape_example_value, cpu_nested_shape_example_bdim, fuse_transform_0213);
  12236. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12237. }
  12238. template <typename batch_rule_t, batch_rule_t batch_rule>
  12239. at::Tensor _nested_tensor_size_generated_plumbing(const at::Tensor & self) {
  12240. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12241. auto maybe_layer = maybeCurrentDynamicLayer();
  12242. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12243. int64_t cur_level = maybe_layer->layerId();
  12244. if (!isBatchedAtLevel(self, cur_level)) {
  12245. return at::_ops::_nested_tensor_size::call(self);
  12246. }
  12247. Tensor self_value;
  12248. optional<int64_t> self_bdim;
  12249. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  12250. auto results = batch_rule(self_value, self_bdim);
  12251. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12252. }
  12253. template <typename batch_rule_t, batch_rule_t batch_rule>
  12254. at::Tensor _nested_tensor_strides_generated_plumbing(const at::Tensor & self) {
  12255. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12256. auto maybe_layer = maybeCurrentDynamicLayer();
  12257. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12258. int64_t cur_level = maybe_layer->layerId();
  12259. if (!isBatchedAtLevel(self, cur_level)) {
  12260. return at::_ops::_nested_tensor_strides::call(self);
  12261. }
  12262. Tensor self_value;
  12263. optional<int64_t> self_bdim;
  12264. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  12265. auto results = batch_rule(self_value, self_bdim);
  12266. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12267. }
  12268. template <typename batch_rule_t, batch_rule_t batch_rule>
  12269. at::Tensor _nested_from_padded_and_nested_example_generated_plumbing(const at::Tensor & padded, const at::Tensor & nt_example) {
  12270. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12271. auto maybe_layer = maybeCurrentDynamicLayer();
  12272. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12273. int64_t cur_level = maybe_layer->layerId();
  12274. if (!isBatchedAtLevel(padded, cur_level) && !isBatchedAtLevel(nt_example, cur_level)) {
  12275. return at::_ops::_nested_from_padded_and_nested_example::call(padded, nt_example);
  12276. }
  12277. Tensor padded_value;
  12278. optional<int64_t> padded_bdim;
  12279. std::tie(padded_value, padded_bdim) = unwrapTensorAtLevel(padded, cur_level);
  12280. Tensor nt_example_value;
  12281. optional<int64_t> nt_example_bdim;
  12282. std::tie(nt_example_value, nt_example_bdim) = unwrapTensorAtLevel(nt_example, cur_level);
  12283. auto results = batch_rule(padded_value, padded_bdim, nt_example_value, nt_example_bdim);
  12284. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12285. }
  12286. template <typename batch_rule_t, batch_rule_t batch_rule>
  12287. at::Tensor _nested_view_from_buffer_generated_plumbing(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, at::IntArrayRef offsets) {
  12288. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12289. auto maybe_layer = maybeCurrentDynamicLayer();
  12290. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12291. int64_t cur_level = maybe_layer->layerId();
  12292. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(nested_size, cur_level) && !isBatchedAtLevel(nested_strides, cur_level)) {
  12293. return at::_ops::_nested_view_from_buffer::call(self, nested_size, nested_strides, offsets);
  12294. }
  12295. Tensor self_value;
  12296. optional<int64_t> self_bdim;
  12297. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  12298. Tensor nested_size_value;
  12299. optional<int64_t> nested_size_bdim;
  12300. std::tie(nested_size_value, nested_size_bdim) = unwrapTensorAtLevel(nested_size, cur_level);
  12301. Tensor nested_strides_value;
  12302. optional<int64_t> nested_strides_bdim;
  12303. std::tie(nested_strides_value, nested_strides_bdim) = unwrapTensorAtLevel(nested_strides, cur_level);
  12304. auto results = batch_rule(self_value, self_bdim, nested_size_value, nested_size_bdim, nested_strides_value, nested_strides_bdim, offsets);
  12305. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12306. }
  12307. template <typename batch_rule_t, batch_rule_t batch_rule>
  12308. at::Tensor _nested_view_from_buffer_copy_generated_plumbing(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, at::IntArrayRef offsets) {
  12309. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12310. auto maybe_layer = maybeCurrentDynamicLayer();
  12311. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12312. int64_t cur_level = maybe_layer->layerId();
  12313. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(nested_size, cur_level) && !isBatchedAtLevel(nested_strides, cur_level)) {
  12314. return at::_ops::_nested_view_from_buffer_copy::call(self, nested_size, nested_strides, offsets);
  12315. }
  12316. Tensor self_value;
  12317. optional<int64_t> self_bdim;
  12318. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  12319. Tensor nested_size_value;
  12320. optional<int64_t> nested_size_bdim;
  12321. std::tie(nested_size_value, nested_size_bdim) = unwrapTensorAtLevel(nested_size, cur_level);
  12322. Tensor nested_strides_value;
  12323. optional<int64_t> nested_strides_bdim;
  12324. std::tie(nested_strides_value, nested_strides_bdim) = unwrapTensorAtLevel(nested_strides, cur_level);
  12325. auto results = batch_rule(self_value, self_bdim, nested_size_value, nested_size_bdim, nested_strides_value, nested_strides_bdim, offsets);
  12326. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12327. }
  12328. template <typename batch_rule_t, batch_rule_t batch_rule>
  12329. at::Tensor _trilinear_generated_plumbing(const at::Tensor & i1, const at::Tensor & i2, const at::Tensor & i3, at::IntArrayRef expand1, at::IntArrayRef expand2, at::IntArrayRef expand3, at::IntArrayRef sumdim, int64_t unroll_dim) {
  12330. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12331. auto maybe_layer = maybeCurrentDynamicLayer();
  12332. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12333. int64_t cur_level = maybe_layer->layerId();
  12334. if (!isBatchedAtLevel(i1, cur_level) && !isBatchedAtLevel(i2, cur_level) && !isBatchedAtLevel(i3, cur_level)) {
  12335. return at::_ops::_trilinear::call(i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim);
  12336. }
  12337. Tensor i1_value;
  12338. optional<int64_t> i1_bdim;
  12339. std::tie(i1_value, i1_bdim) = unwrapTensorAtLevel(i1, cur_level);
  12340. Tensor i2_value;
  12341. optional<int64_t> i2_bdim;
  12342. std::tie(i2_value, i2_bdim) = unwrapTensorAtLevel(i2, cur_level);
  12343. Tensor i3_value;
  12344. optional<int64_t> i3_bdim;
  12345. std::tie(i3_value, i3_bdim) = unwrapTensorAtLevel(i3, cur_level);
  12346. auto results = batch_rule(i1_value, i1_bdim, i2_value, i2_bdim, i3_value, i3_bdim, expand1, expand2, expand3, sumdim, unroll_dim);
  12347. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12348. }
  12349. template <typename batch_rule_t, batch_rule_t batch_rule>
  12350. at::Tensor triplet_margin_loss_generated_plumbing(const at::Tensor & anchor, const at::Tensor & positive, const at::Tensor & negative, double margin, double p, double eps, bool swap, int64_t reduction) {
  12351. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12352. auto maybe_layer = maybeCurrentDynamicLayer();
  12353. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12354. int64_t cur_level = maybe_layer->layerId();
  12355. if (!isBatchedAtLevel(anchor, cur_level) && !isBatchedAtLevel(positive, cur_level) && !isBatchedAtLevel(negative, cur_level)) {
  12356. return at::_ops::triplet_margin_loss::call(anchor, positive, negative, margin, p, eps, swap, reduction);
  12357. }
  12358. Tensor anchor_value;
  12359. optional<int64_t> anchor_bdim;
  12360. std::tie(anchor_value, anchor_bdim) = unwrapTensorAtLevel(anchor, cur_level);
  12361. Tensor positive_value;
  12362. optional<int64_t> positive_bdim;
  12363. std::tie(positive_value, positive_bdim) = unwrapTensorAtLevel(positive, cur_level);
  12364. Tensor negative_value;
  12365. optional<int64_t> negative_bdim;
  12366. std::tie(negative_value, negative_bdim) = unwrapTensorAtLevel(negative, cur_level);
  12367. auto results = batch_rule(anchor_value, anchor_bdim, positive_value, positive_bdim, negative_value, negative_bdim, margin, p, eps, swap, reduction);
  12368. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12369. }
  12370. template <typename batch_rule_t, batch_rule_t batch_rule>
  12371. at::Tensor trunc_generated_plumbing(const at::Tensor & self) {
  12372. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12373. auto maybe_layer = maybeCurrentDynamicLayer();
  12374. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12375. int64_t cur_level = maybe_layer->layerId();
  12376. if (!isBatchedAtLevel(self, cur_level)) {
  12377. return at::_ops::trunc::call(self);
  12378. }
  12379. Tensor self_value;
  12380. optional<int64_t> self_bdim;
  12381. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  12382. auto results = batch_rule(self_value, self_bdim);
  12383. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12384. }
  12385. template <typename batch_rule_t, batch_rule_t batch_rule>
  12386. at::Tensor & trunc__generated_plumbing(at::Tensor & self) {
  12387. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12388. auto maybe_layer = maybeCurrentDynamicLayer();
  12389. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  12390. int64_t cur_level = maybe_layer->layerId();
  12391. if (!isBatchedAtLevel(self, cur_level)) {
  12392. return at::_ops::trunc_::call(self);
  12393. }
  12394. Tensor self_value;
  12395. optional<int64_t> self_bdim;
  12396. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  12397. batch_rule(self_value, self_bdim);
  12398. return self;
  12399. }
  12400. template <typename batch_rule_t, batch_rule_t batch_rule>
  12401. at::Tensor fix_generated_plumbing(const at::Tensor & self) {
  12402. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12403. auto maybe_layer = maybeCurrentDynamicLayer();
  12404. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12405. int64_t cur_level = maybe_layer->layerId();
  12406. if (!isBatchedAtLevel(self, cur_level)) {
  12407. return at::_ops::fix::call(self);
  12408. }
  12409. Tensor self_value;
  12410. optional<int64_t> self_bdim;
  12411. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  12412. auto results = batch_rule(self_value, self_bdim);
  12413. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12414. }
  12415. template <typename batch_rule_t, batch_rule_t batch_rule>
  12416. at::Tensor & fix__generated_plumbing(at::Tensor & self) {
  12417. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12418. auto maybe_layer = maybeCurrentDynamicLayer();
  12419. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  12420. int64_t cur_level = maybe_layer->layerId();
  12421. if (!isBatchedAtLevel(self, cur_level)) {
  12422. return at::_ops::fix_::call(self);
  12423. }
  12424. Tensor self_value;
  12425. optional<int64_t> self_bdim;
  12426. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  12427. batch_rule(self_value, self_bdim);
  12428. return self;
  12429. }
  12430. template <typename batch_rule_t, batch_rule_t batch_rule>
  12431. at::Tensor type_as_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  12432. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12433. auto maybe_layer = maybeCurrentDynamicLayer();
  12434. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12435. int64_t cur_level = maybe_layer->layerId();
  12436. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  12437. return at::_ops::type_as::call(self, other);
  12438. }
  12439. Tensor self_value;
  12440. optional<int64_t> self_bdim;
  12441. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  12442. Tensor other_value;
  12443. optional<int64_t> other_bdim;
  12444. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  12445. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  12446. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12447. }
  12448. template <typename batch_rule_t, batch_rule_t batch_rule>
  12449. ::std::tuple<at::Tensor,at::Tensor> _unique_generated_plumbing(const at::Tensor & self, bool sorted, bool return_inverse) {
  12450. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12451. auto maybe_layer = maybeCurrentDynamicLayer();
  12452. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12453. int64_t cur_level = maybe_layer->layerId();
  12454. if (!isBatchedAtLevel(self, cur_level)) {
  12455. return at::_ops::_unique::call(self, sorted, return_inverse);
  12456. }
  12457. Tensor self_value;
  12458. optional<int64_t> self_bdim;
  12459. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  12460. auto results = batch_rule(self_value, self_bdim, sorted, return_inverse);
  12461. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  12462. }
  12463. template <typename batch_rule_t, batch_rule_t batch_rule>
  12464. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> unique_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts) {
  12465. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12466. auto maybe_layer = maybeCurrentDynamicLayer();
  12467. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12468. int64_t cur_level = maybe_layer->layerId();
  12469. if (!isBatchedAtLevel(self, cur_level)) {
  12470. return at::_ops::unique_dim::call(self, dim, sorted, return_inverse, return_counts);
  12471. }
  12472. Tensor self_value;
  12473. optional<int64_t> self_bdim;
  12474. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  12475. auto results = batch_rule(self_value, self_bdim, dim, sorted, return_inverse, return_counts);
  12476. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  12477. }
  12478. template <typename batch_rule_t, batch_rule_t batch_rule>
  12479. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> unique_consecutive_generated_plumbing(const at::Tensor & self, bool return_inverse, bool return_counts, c10::optional<int64_t> dim) {
  12480. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12481. auto maybe_layer = maybeCurrentDynamicLayer();
  12482. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12483. int64_t cur_level = maybe_layer->layerId();
  12484. if (!isBatchedAtLevel(self, cur_level)) {
  12485. return at::_ops::unique_consecutive::call(self, return_inverse, return_counts, dim);
  12486. }
  12487. Tensor self_value;
  12488. optional<int64_t> self_bdim;
  12489. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  12490. auto results = batch_rule(self_value, self_bdim, return_inverse, return_counts, dim);
  12491. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  12492. }
  12493. template <typename batch_rule_t, batch_rule_t batch_rule>
  12494. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> unique_dim_consecutive_generated_plumbing(const at::Tensor & self, int64_t dim, bool return_inverse, bool return_counts) {
  12495. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12496. auto maybe_layer = maybeCurrentDynamicLayer();
  12497. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12498. int64_t cur_level = maybe_layer->layerId();
  12499. if (!isBatchedAtLevel(self, cur_level)) {
  12500. return at::_ops::unique_dim_consecutive::call(self, dim, return_inverse, return_counts);
  12501. }
  12502. Tensor self_value;
  12503. optional<int64_t> self_bdim;
  12504. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  12505. auto results = batch_rule(self_value, self_bdim, dim, return_inverse, return_counts);
  12506. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  12507. }
  12508. template <typename batch_rule_t, batch_rule_t batch_rule>
  12509. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _unique2_generated_plumbing(const at::Tensor & self, bool sorted, bool return_inverse, bool return_counts) {
  12510. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12511. auto maybe_layer = maybeCurrentDynamicLayer();
  12512. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12513. int64_t cur_level = maybe_layer->layerId();
  12514. if (!isBatchedAtLevel(self, cur_level)) {
  12515. return at::_ops::_unique2::call(self, sorted, return_inverse, return_counts);
  12516. }
  12517. Tensor self_value;
  12518. optional<int64_t> self_bdim;
  12519. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  12520. auto results = batch_rule(self_value, self_bdim, sorted, return_inverse, return_counts);
  12521. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  12522. }
  12523. template <typename batch_rule_t, batch_rule_t batch_rule>
  12524. at::Tensor _unsafe_view_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size) {
  12525. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12526. auto maybe_layer = maybeCurrentDynamicLayer();
  12527. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12528. int64_t cur_level = maybe_layer->layerId();
  12529. if (!isBatchedAtLevel(self, cur_level)) {
  12530. return at::_ops::_unsafe_view::call(self, size);
  12531. }
  12532. Tensor self_value;
  12533. optional<int64_t> self_bdim;
  12534. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  12535. auto results = batch_rule(self_value, self_bdim, size);
  12536. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12537. }
  12538. template <typename batch_rule_t, batch_rule_t batch_rule>
  12539. at::Tensor unsqueeze_generated_plumbing(const at::Tensor & self, int64_t dim) {
  12540. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12541. auto maybe_layer = maybeCurrentDynamicLayer();
  12542. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12543. int64_t cur_level = maybe_layer->layerId();
  12544. if (!isBatchedAtLevel(self, cur_level)) {
  12545. return at::_ops::unsqueeze::call(self, dim);
  12546. }
  12547. Tensor self_value;
  12548. optional<int64_t> self_bdim;
  12549. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  12550. auto results = batch_rule(self_value, self_bdim, dim);
  12551. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12552. }
  12553. template <typename batch_rule_t, batch_rule_t batch_rule>
  12554. at::Tensor vander_generated_plumbing(const at::Tensor & x, c10::optional<int64_t> N, bool increasing) {
  12555. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12556. auto maybe_layer = maybeCurrentDynamicLayer();
  12557. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12558. int64_t cur_level = maybe_layer->layerId();
  12559. if (!isBatchedAtLevel(x, cur_level)) {
  12560. return at::_ops::vander::call(x, N, increasing);
  12561. }
  12562. Tensor x_value;
  12563. optional<int64_t> x_bdim;
  12564. std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
  12565. auto results = batch_rule(x_value, x_bdim, N, increasing);
  12566. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12567. }
  12568. template <typename batch_rule_t, batch_rule_t batch_rule>
  12569. at::Tensor var_generated_plumbing(const at::Tensor & self, bool unbiased) {
  12570. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12571. auto maybe_layer = maybeCurrentDynamicLayer();
  12572. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12573. int64_t cur_level = maybe_layer->layerId();
  12574. if (!isBatchedAtLevel(self, cur_level)) {
  12575. return at::_ops::var::call(self, unbiased);
  12576. }
  12577. Tensor self_value;
  12578. optional<int64_t> self_bdim;
  12579. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  12580. auto results = batch_rule(self_value, self_bdim, unbiased);
  12581. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12582. }
  12583. template <typename batch_rule_t, batch_rule_t batch_rule>
  12584. at::Tensor var_dim_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
  12585. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12586. auto maybe_layer = maybeCurrentDynamicLayer();
  12587. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12588. int64_t cur_level = maybe_layer->layerId();
  12589. if (!isBatchedAtLevel(self, cur_level)) {
  12590. return at::_ops::var_dim::call(self, dim, unbiased, keepdim);
  12591. }
  12592. Tensor self_value;
  12593. optional<int64_t> self_bdim;
  12594. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  12595. auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim);
  12596. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12597. }
  12598. template <typename batch_rule_t, batch_rule_t batch_rule>
  12599. at::Tensor var_correction_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim) {
  12600. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12601. auto maybe_layer = maybeCurrentDynamicLayer();
  12602. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12603. int64_t cur_level = maybe_layer->layerId();
  12604. if (!isBatchedAtLevel(self, cur_level)) {
  12605. return at::_ops::var_correction::call(self, dim, correction, keepdim);
  12606. }
  12607. Tensor self_value;
  12608. optional<int64_t> self_bdim;
  12609. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  12610. auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim);
  12611. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12612. }
  12613. template <typename batch_rule_t, batch_rule_t batch_rule>
  12614. at::Tensor var_names_dim_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
  12615. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12616. auto maybe_layer = maybeCurrentDynamicLayer();
  12617. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12618. int64_t cur_level = maybe_layer->layerId();
  12619. if (!isBatchedAtLevel(self, cur_level)) {
  12620. return at::_ops::var_names_dim::call(self, dim, unbiased, keepdim);
  12621. }
  12622. Tensor self_value;
  12623. optional<int64_t> self_bdim;
  12624. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  12625. auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim);
  12626. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12627. }
  12628. template <typename batch_rule_t, batch_rule_t batch_rule>
  12629. at::Tensor var_correction_names_generated_plumbing(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim) {
  12630. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12631. auto maybe_layer = maybeCurrentDynamicLayer();
  12632. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12633. int64_t cur_level = maybe_layer->layerId();
  12634. if (!isBatchedAtLevel(self, cur_level)) {
  12635. return at::_ops::var_correction_names::call(self, dim, correction, keepdim);
  12636. }
  12637. Tensor self_value;
  12638. optional<int64_t> self_bdim;
  12639. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  12640. auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim);
  12641. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12642. }
  12643. template <typename batch_rule_t, batch_rule_t batch_rule>
  12644. ::std::tuple<at::Tensor,at::Tensor> var_mean_generated_plumbing(const at::Tensor & self, bool unbiased) {
  12645. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12646. auto maybe_layer = maybeCurrentDynamicLayer();
  12647. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12648. int64_t cur_level = maybe_layer->layerId();
  12649. if (!isBatchedAtLevel(self, cur_level)) {
  12650. return at::_ops::var_mean::call(self, unbiased);
  12651. }
  12652. Tensor self_value;
  12653. optional<int64_t> self_bdim;
  12654. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  12655. auto results = batch_rule(self_value, self_bdim, unbiased);
  12656. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  12657. }
  12658. template <typename batch_rule_t, batch_rule_t batch_rule>
  12659. ::std::tuple<at::Tensor,at::Tensor> var_mean_dim_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
  12660. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12661. auto maybe_layer = maybeCurrentDynamicLayer();
  12662. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12663. int64_t cur_level = maybe_layer->layerId();
  12664. if (!isBatchedAtLevel(self, cur_level)) {
  12665. return at::_ops::var_mean_dim::call(self, dim, unbiased, keepdim);
  12666. }
  12667. Tensor self_value;
  12668. optional<int64_t> self_bdim;
  12669. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  12670. auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim);
  12671. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  12672. }
  12673. template <typename batch_rule_t, batch_rule_t batch_rule>
  12674. ::std::tuple<at::Tensor,at::Tensor> var_mean_correction_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim) {
  12675. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12676. auto maybe_layer = maybeCurrentDynamicLayer();
  12677. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12678. int64_t cur_level = maybe_layer->layerId();
  12679. if (!isBatchedAtLevel(self, cur_level)) {
  12680. return at::_ops::var_mean_correction::call(self, dim, correction, keepdim);
  12681. }
  12682. Tensor self_value;
  12683. optional<int64_t> self_bdim;
  12684. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  12685. auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim);
  12686. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  12687. }
  12688. template <typename batch_rule_t, batch_rule_t batch_rule>
  12689. ::std::tuple<at::Tensor,at::Tensor> var_mean_names_dim_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
  12690. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12691. auto maybe_layer = maybeCurrentDynamicLayer();
  12692. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12693. int64_t cur_level = maybe_layer->layerId();
  12694. if (!isBatchedAtLevel(self, cur_level)) {
  12695. return at::_ops::var_mean_names_dim::call(self, dim, unbiased, keepdim);
  12696. }
  12697. Tensor self_value;
  12698. optional<int64_t> self_bdim;
  12699. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  12700. auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim);
  12701. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  12702. }
  12703. template <typename batch_rule_t, batch_rule_t batch_rule>
  12704. ::std::tuple<at::Tensor,at::Tensor> var_mean_correction_names_generated_plumbing(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim) {
  12705. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12706. auto maybe_layer = maybeCurrentDynamicLayer();
  12707. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12708. int64_t cur_level = maybe_layer->layerId();
  12709. if (!isBatchedAtLevel(self, cur_level)) {
  12710. return at::_ops::var_mean_correction_names::call(self, dim, correction, keepdim);
  12711. }
  12712. Tensor self_value;
  12713. optional<int64_t> self_bdim;
  12714. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  12715. auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim);
  12716. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  12717. }
  12718. template <typename batch_rule_t, batch_rule_t batch_rule>
  12719. at::Tensor view_as_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  12720. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12721. auto maybe_layer = maybeCurrentDynamicLayer();
  12722. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12723. int64_t cur_level = maybe_layer->layerId();
  12724. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  12725. return at::_ops::view_as::call(self, other);
  12726. }
  12727. Tensor self_value;
  12728. optional<int64_t> self_bdim;
  12729. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  12730. Tensor other_value;
  12731. optional<int64_t> other_bdim;
  12732. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  12733. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  12734. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12735. }
  12736. template <typename batch_rule_t, batch_rule_t batch_rule>
  12737. at::Tensor where_self_generated_plumbing(const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other) {
  12738. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12739. auto maybe_layer = maybeCurrentDynamicLayer();
  12740. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12741. int64_t cur_level = maybe_layer->layerId();
  12742. if (!isBatchedAtLevel(condition, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  12743. return at::_ops::where_self::call(condition, self, other);
  12744. }
  12745. Tensor condition_value;
  12746. optional<int64_t> condition_bdim;
  12747. std::tie(condition_value, condition_bdim) = unwrapTensorAtLevel(condition, cur_level);
  12748. Tensor self_value;
  12749. optional<int64_t> self_bdim;
  12750. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  12751. Tensor other_value;
  12752. optional<int64_t> other_bdim;
  12753. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  12754. auto results = batch_rule(condition_value, condition_bdim, self_value, self_bdim, other_value, other_bdim);
  12755. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12756. }
  12757. template <typename batch_rule_t, batch_rule_t batch_rule>
  12758. at::Tensor where_ScalarSelf_generated_plumbing(const at::Tensor & condition, const at::Scalar & self, const at::Tensor & other) {
  12759. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12760. auto maybe_layer = maybeCurrentDynamicLayer();
  12761. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12762. int64_t cur_level = maybe_layer->layerId();
  12763. if (!isBatchedAtLevel(condition, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  12764. return at::_ops::where_ScalarSelf::call(condition, self, other);
  12765. }
  12766. Tensor condition_value;
  12767. optional<int64_t> condition_bdim;
  12768. std::tie(condition_value, condition_bdim) = unwrapTensorAtLevel(condition, cur_level);
  12769. Tensor other_value;
  12770. optional<int64_t> other_bdim;
  12771. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  12772. auto results = batch_rule(condition_value, condition_bdim, self, other_value, other_bdim);
  12773. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12774. }
  12775. template <typename batch_rule_t, batch_rule_t batch_rule>
  12776. at::Tensor where_ScalarOther_generated_plumbing(const at::Tensor & condition, const at::Tensor & self, const at::Scalar & other) {
  12777. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12778. auto maybe_layer = maybeCurrentDynamicLayer();
  12779. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12780. int64_t cur_level = maybe_layer->layerId();
  12781. if (!isBatchedAtLevel(condition, cur_level) && !isBatchedAtLevel(self, cur_level)) {
  12782. return at::_ops::where_ScalarOther::call(condition, self, other);
  12783. }
  12784. Tensor condition_value;
  12785. optional<int64_t> condition_bdim;
  12786. std::tie(condition_value, condition_bdim) = unwrapTensorAtLevel(condition, cur_level);
  12787. Tensor self_value;
  12788. optional<int64_t> self_bdim;
  12789. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  12790. auto results = batch_rule(condition_value, condition_bdim, self_value, self_bdim, other);
  12791. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12792. }
  12793. template <typename batch_rule_t, batch_rule_t batch_rule>
  12794. at::Tensor where_Scalar_generated_plumbing(const at::Tensor & condition, const at::Scalar & self, const at::Scalar & other) {
  12795. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12796. auto maybe_layer = maybeCurrentDynamicLayer();
  12797. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12798. int64_t cur_level = maybe_layer->layerId();
  12799. if (!isBatchedAtLevel(condition, cur_level)) {
  12800. return at::_ops::where_Scalar::call(condition, self, other);
  12801. }
  12802. Tensor condition_value;
  12803. optional<int64_t> condition_bdim;
  12804. std::tie(condition_value, condition_bdim) = unwrapTensorAtLevel(condition, cur_level);
  12805. auto results = batch_rule(condition_value, condition_bdim, self, other);
  12806. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12807. }
  12808. template <typename batch_rule_t, batch_rule_t batch_rule>
  12809. ::std::vector<at::Tensor> where_generated_plumbing(const at::Tensor & condition) {
  12810. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12811. auto maybe_layer = maybeCurrentDynamicLayer();
  12812. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12813. int64_t cur_level = maybe_layer->layerId();
  12814. if (!isBatchedAtLevel(condition, cur_level)) {
  12815. return at::_ops::where::call(condition);
  12816. }
  12817. Tensor condition_value;
  12818. optional<int64_t> condition_bdim;
  12819. std::tie(condition_value, condition_bdim) = unwrapTensorAtLevel(condition, cur_level);
  12820. auto results = batch_rule(condition_value, condition_bdim);
  12821. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  12822. }
  12823. template <typename batch_rule_t, batch_rule_t batch_rule>
  12824. at::Tensor norm_except_dim_generated_plumbing(const at::Tensor & v, int64_t pow, int64_t dim) {
  12825. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12826. auto maybe_layer = maybeCurrentDynamicLayer();
  12827. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12828. int64_t cur_level = maybe_layer->layerId();
  12829. if (!isBatchedAtLevel(v, cur_level)) {
  12830. return at::_ops::norm_except_dim::call(v, pow, dim);
  12831. }
  12832. Tensor v_value;
  12833. optional<int64_t> v_bdim;
  12834. std::tie(v_value, v_bdim) = unwrapTensorAtLevel(v, cur_level);
  12835. auto results = batch_rule(v_value, v_bdim, pow, dim);
  12836. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12837. }
  12838. template <typename batch_rule_t, batch_rule_t batch_rule>
  12839. at::Tensor _weight_norm_generated_plumbing(const at::Tensor & v, const at::Tensor & g, int64_t dim) {
  12840. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12841. auto maybe_layer = maybeCurrentDynamicLayer();
  12842. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12843. int64_t cur_level = maybe_layer->layerId();
  12844. if (!isBatchedAtLevel(v, cur_level) && !isBatchedAtLevel(g, cur_level)) {
  12845. return at::_ops::_weight_norm::call(v, g, dim);
  12846. }
  12847. Tensor v_value;
  12848. optional<int64_t> v_bdim;
  12849. std::tie(v_value, v_bdim) = unwrapTensorAtLevel(v, cur_level);
  12850. Tensor g_value;
  12851. optional<int64_t> g_bdim;
  12852. std::tie(g_value, g_bdim) = unwrapTensorAtLevel(g, cur_level);
  12853. auto results = batch_rule(v_value, v_bdim, g_value, g_bdim, dim);
  12854. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12855. }
  12856. template <typename batch_rule_t, batch_rule_t batch_rule>
  12857. ::std::tuple<at::Tensor,at::Tensor> _weight_norm_interface_generated_plumbing(const at::Tensor & v, const at::Tensor & g, int64_t dim) {
  12858. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12859. auto maybe_layer = maybeCurrentDynamicLayer();
  12860. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12861. int64_t cur_level = maybe_layer->layerId();
  12862. if (!isBatchedAtLevel(v, cur_level) && !isBatchedAtLevel(g, cur_level)) {
  12863. return at::_ops::_weight_norm_interface::call(v, g, dim);
  12864. }
  12865. Tensor v_value;
  12866. optional<int64_t> v_bdim;
  12867. std::tie(v_value, v_bdim) = unwrapTensorAtLevel(v, cur_level);
  12868. Tensor g_value;
  12869. optional<int64_t> g_bdim;
  12870. std::tie(g_value, g_bdim) = unwrapTensorAtLevel(g, cur_level);
  12871. auto results = batch_rule(v_value, v_bdim, g_value, g_bdim, dim);
  12872. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  12873. }
  12874. template <typename batch_rule_t, batch_rule_t batch_rule>
  12875. ::std::tuple<at::Tensor,at::Tensor> _weight_norm_interface_backward_generated_plumbing(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) {
  12876. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12877. auto maybe_layer = maybeCurrentDynamicLayer();
  12878. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12879. int64_t cur_level = maybe_layer->layerId();
  12880. if (!isBatchedAtLevel(grad_w, cur_level) && !isBatchedAtLevel(saved_v, cur_level) && !isBatchedAtLevel(saved_g, cur_level) && !isBatchedAtLevel(saved_norms, cur_level)) {
  12881. return at::_ops::_weight_norm_interface_backward::call(grad_w, saved_v, saved_g, saved_norms, dim);
  12882. }
  12883. Tensor grad_w_value;
  12884. optional<int64_t> grad_w_bdim;
  12885. std::tie(grad_w_value, grad_w_bdim) = unwrapTensorAtLevel(grad_w, cur_level);
  12886. Tensor saved_v_value;
  12887. optional<int64_t> saved_v_bdim;
  12888. std::tie(saved_v_value, saved_v_bdim) = unwrapTensorAtLevel(saved_v, cur_level);
  12889. Tensor saved_g_value;
  12890. optional<int64_t> saved_g_bdim;
  12891. std::tie(saved_g_value, saved_g_bdim) = unwrapTensorAtLevel(saved_g, cur_level);
  12892. Tensor saved_norms_value;
  12893. optional<int64_t> saved_norms_bdim;
  12894. std::tie(saved_norms_value, saved_norms_bdim) = unwrapTensorAtLevel(saved_norms, cur_level);
  12895. auto results = batch_rule(grad_w_value, grad_w_bdim, saved_v_value, saved_v_bdim, saved_g_value, saved_g_bdim, saved_norms_value, saved_norms_bdim, dim);
  12896. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  12897. }
  12898. template <typename batch_rule_t, batch_rule_t batch_rule>
  12899. ::std::tuple<at::Tensor,at::Tensor> _weight_norm_differentiable_backward_generated_plumbing(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) {
  12900. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12901. auto maybe_layer = maybeCurrentDynamicLayer();
  12902. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12903. int64_t cur_level = maybe_layer->layerId();
  12904. if (!isBatchedAtLevel(grad_w, cur_level) && !isBatchedAtLevel(saved_v, cur_level) && !isBatchedAtLevel(saved_g, cur_level) && !isBatchedAtLevel(saved_norms, cur_level)) {
  12905. return at::_ops::_weight_norm_differentiable_backward::call(grad_w, saved_v, saved_g, saved_norms, dim);
  12906. }
  12907. Tensor grad_w_value;
  12908. optional<int64_t> grad_w_bdim;
  12909. std::tie(grad_w_value, grad_w_bdim) = unwrapTensorAtLevel(grad_w, cur_level);
  12910. Tensor saved_v_value;
  12911. optional<int64_t> saved_v_bdim;
  12912. std::tie(saved_v_value, saved_v_bdim) = unwrapTensorAtLevel(saved_v, cur_level);
  12913. Tensor saved_g_value;
  12914. optional<int64_t> saved_g_bdim;
  12915. std::tie(saved_g_value, saved_g_bdim) = unwrapTensorAtLevel(saved_g, cur_level);
  12916. Tensor saved_norms_value;
  12917. optional<int64_t> saved_norms_bdim;
  12918. std::tie(saved_norms_value, saved_norms_bdim) = unwrapTensorAtLevel(saved_norms, cur_level);
  12919. auto results = batch_rule(grad_w_value, grad_w_bdim, saved_v_value, saved_v_bdim, saved_g_value, saved_g_bdim, saved_norms_value, saved_norms_bdim, dim);
  12920. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  12921. }
  12922. template <typename batch_rule_t, batch_rule_t batch_rule>
  12923. at::Tensor zeros_like_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
  12924. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12925. auto maybe_layer = maybeCurrentDynamicLayer();
  12926. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12927. int64_t cur_level = maybe_layer->layerId();
  12928. if (!isBatchedAtLevel(self, cur_level)) {
  12929. return at::_ops::zeros_like::call(self, dtype, layout, device, pin_memory, memory_format);
  12930. }
  12931. Tensor self_value;
  12932. optional<int64_t> self_bdim;
  12933. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  12934. auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, memory_format);
  12935. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12936. }
  12937. template <typename batch_rule_t, batch_rule_t batch_rule>
  12938. at::Tensor _standard_gamma_grad_generated_plumbing(const at::Tensor & self, const at::Tensor & output) {
  12939. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12940. auto maybe_layer = maybeCurrentDynamicLayer();
  12941. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12942. int64_t cur_level = maybe_layer->layerId();
  12943. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(output, cur_level)) {
  12944. return at::_ops::_standard_gamma_grad::call(self, output);
  12945. }
  12946. Tensor self_value;
  12947. optional<int64_t> self_bdim;
  12948. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  12949. Tensor output_value;
  12950. optional<int64_t> output_bdim;
  12951. std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
  12952. auto results = batch_rule(self_value, self_bdim, output_value, output_bdim);
  12953. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12954. }
  12955. template <typename batch_rule_t, batch_rule_t batch_rule>
  12956. at::Tensor _standard_gamma_generated_plumbing(const at::Tensor & self, c10::optional<at::Generator> generator) {
  12957. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12958. auto maybe_layer = maybeCurrentDynamicLayer();
  12959. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12960. int64_t cur_level = maybe_layer->layerId();
  12961. if (!isBatchedAtLevel(self, cur_level)) {
  12962. return at::_ops::_standard_gamma::call(self, generator);
  12963. }
  12964. Tensor self_value;
  12965. optional<int64_t> self_bdim;
  12966. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  12967. auto results = batch_rule(self_value, self_bdim, generator);
  12968. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12969. }
  12970. template <typename batch_rule_t, batch_rule_t batch_rule>
  12971. at::Tensor _dirichlet_grad_generated_plumbing(const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total) {
  12972. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12973. auto maybe_layer = maybeCurrentDynamicLayer();
  12974. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12975. int64_t cur_level = maybe_layer->layerId();
  12976. if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(alpha, cur_level) && !isBatchedAtLevel(total, cur_level)) {
  12977. return at::_ops::_dirichlet_grad::call(x, alpha, total);
  12978. }
  12979. Tensor x_value;
  12980. optional<int64_t> x_bdim;
  12981. std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
  12982. Tensor alpha_value;
  12983. optional<int64_t> alpha_bdim;
  12984. std::tie(alpha_value, alpha_bdim) = unwrapTensorAtLevel(alpha, cur_level);
  12985. Tensor total_value;
  12986. optional<int64_t> total_bdim;
  12987. std::tie(total_value, total_bdim) = unwrapTensorAtLevel(total, cur_level);
  12988. auto results = batch_rule(x_value, x_bdim, alpha_value, alpha_bdim, total_value, total_bdim);
  12989. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  12990. }
  12991. template <typename batch_rule_t, batch_rule_t batch_rule>
  12992. at::Tensor _sample_dirichlet_generated_plumbing(const at::Tensor & self, c10::optional<at::Generator> generator) {
  12993. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  12994. auto maybe_layer = maybeCurrentDynamicLayer();
  12995. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  12996. int64_t cur_level = maybe_layer->layerId();
  12997. if (!isBatchedAtLevel(self, cur_level)) {
  12998. return at::_ops::_sample_dirichlet::call(self, generator);
  12999. }
  13000. Tensor self_value;
  13001. optional<int64_t> self_bdim;
  13002. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13003. auto results = batch_rule(self_value, self_bdim, generator);
  13004. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13005. }
  13006. template <typename batch_rule_t, batch_rule_t batch_rule>
  13007. at::Tensor poisson_generated_plumbing(const at::Tensor & self, c10::optional<at::Generator> generator) {
  13008. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13009. auto maybe_layer = maybeCurrentDynamicLayer();
  13010. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13011. int64_t cur_level = maybe_layer->layerId();
  13012. if (!isBatchedAtLevel(self, cur_level)) {
  13013. return at::_ops::poisson::call(self, generator);
  13014. }
  13015. Tensor self_value;
  13016. optional<int64_t> self_bdim;
  13017. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13018. auto results = batch_rule(self_value, self_bdim, generator);
  13019. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13020. }
  13021. template <typename batch_rule_t, batch_rule_t batch_rule>
  13022. at::Tensor binomial_generated_plumbing(const at::Tensor & count, const at::Tensor & prob, c10::optional<at::Generator> generator) {
  13023. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13024. auto maybe_layer = maybeCurrentDynamicLayer();
  13025. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13026. int64_t cur_level = maybe_layer->layerId();
  13027. if (!isBatchedAtLevel(count, cur_level) && !isBatchedAtLevel(prob, cur_level)) {
  13028. return at::_ops::binomial::call(count, prob, generator);
  13029. }
  13030. Tensor count_value;
  13031. optional<int64_t> count_bdim;
  13032. std::tie(count_value, count_bdim) = unwrapTensorAtLevel(count, cur_level);
  13033. Tensor prob_value;
  13034. optional<int64_t> prob_bdim;
  13035. std::tie(prob_value, prob_bdim) = unwrapTensorAtLevel(prob, cur_level);
  13036. auto results = batch_rule(count_value, count_bdim, prob_value, prob_bdim, generator);
  13037. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13038. }
  13039. template <typename batch_rule_t, batch_rule_t batch_rule>
  13040. at::Tensor native_norm_generated_plumbing(const at::Tensor & self, const at::Scalar & p) {
  13041. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13042. auto maybe_layer = maybeCurrentDynamicLayer();
  13043. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13044. int64_t cur_level = maybe_layer->layerId();
  13045. if (!isBatchedAtLevel(self, cur_level)) {
  13046. return at::_ops::native_norm::call(self, p);
  13047. }
  13048. Tensor self_value;
  13049. optional<int64_t> self_bdim;
  13050. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13051. auto results = batch_rule(self_value, self_bdim, p);
  13052. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13053. }
  13054. template <typename batch_rule_t, batch_rule_t batch_rule>
  13055. at::Tensor native_norm_ScalarOpt_dim_dtype_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
  13056. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13057. auto maybe_layer = maybeCurrentDynamicLayer();
  13058. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13059. int64_t cur_level = maybe_layer->layerId();
  13060. if (!isBatchedAtLevel(self, cur_level)) {
  13061. return at::_ops::native_norm_ScalarOpt_dim_dtype::call(self, p, dim, keepdim, dtype);
  13062. }
  13063. Tensor self_value;
  13064. optional<int64_t> self_bdim;
  13065. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13066. auto results = batch_rule(self_value, self_bdim, p, dim, keepdim, dtype);
  13067. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13068. }
  13069. template <typename batch_rule_t, batch_rule_t batch_rule>
  13070. at::Tensor _sparse_sum_generated_plumbing(const at::Tensor & self) {
  13071. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13072. auto maybe_layer = maybeCurrentDynamicLayer();
  13073. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13074. int64_t cur_level = maybe_layer->layerId();
  13075. if (!isBatchedAtLevel(self, cur_level)) {
  13076. return at::_ops::_sparse_sum::call(self);
  13077. }
  13078. Tensor self_value;
  13079. optional<int64_t> self_bdim;
  13080. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13081. auto results = batch_rule(self_value, self_bdim);
  13082. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13083. }
  13084. template <typename batch_rule_t, batch_rule_t batch_rule>
  13085. at::Tensor _sparse_sum_dtype_generated_plumbing(const at::Tensor & self, at::ScalarType dtype) {
  13086. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13087. auto maybe_layer = maybeCurrentDynamicLayer();
  13088. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13089. int64_t cur_level = maybe_layer->layerId();
  13090. if (!isBatchedAtLevel(self, cur_level)) {
  13091. return at::_ops::_sparse_sum_dtype::call(self, dtype);
  13092. }
  13093. Tensor self_value;
  13094. optional<int64_t> self_bdim;
  13095. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13096. auto results = batch_rule(self_value, self_bdim, dtype);
  13097. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13098. }
  13099. template <typename batch_rule_t, batch_rule_t batch_rule>
  13100. at::Tensor _sparse_sum_dim_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim) {
  13101. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13102. auto maybe_layer = maybeCurrentDynamicLayer();
  13103. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13104. int64_t cur_level = maybe_layer->layerId();
  13105. if (!isBatchedAtLevel(self, cur_level)) {
  13106. return at::_ops::_sparse_sum_dim::call(self, dim);
  13107. }
  13108. Tensor self_value;
  13109. optional<int64_t> self_bdim;
  13110. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13111. auto results = batch_rule(self_value, self_bdim, dim);
  13112. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13113. }
  13114. template <typename batch_rule_t, batch_rule_t batch_rule>
  13115. at::Tensor _sparse_sum_dim_dtype_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, at::ScalarType dtype) {
  13116. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13117. auto maybe_layer = maybeCurrentDynamicLayer();
  13118. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13119. int64_t cur_level = maybe_layer->layerId();
  13120. if (!isBatchedAtLevel(self, cur_level)) {
  13121. return at::_ops::_sparse_sum_dim_dtype::call(self, dim, dtype);
  13122. }
  13123. Tensor self_value;
  13124. optional<int64_t> self_bdim;
  13125. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13126. auto results = batch_rule(self_value, self_bdim, dim, dtype);
  13127. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13128. }
  13129. template <typename batch_rule_t, batch_rule_t batch_rule>
  13130. at::Tensor _sparse_sum_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim) {
  13131. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13132. auto maybe_layer = maybeCurrentDynamicLayer();
  13133. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13134. int64_t cur_level = maybe_layer->layerId();
  13135. if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level)) {
  13136. return at::_ops::_sparse_sum_backward::call(grad, self, dim);
  13137. }
  13138. Tensor grad_value;
  13139. optional<int64_t> grad_bdim;
  13140. std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
  13141. Tensor self_value;
  13142. optional<int64_t> self_bdim;
  13143. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13144. auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, dim);
  13145. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13146. }
  13147. template <typename batch_rule_t, batch_rule_t batch_rule>
  13148. at::Tensor _sparse_csr_sum_dim_dtype_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
  13149. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13150. auto maybe_layer = maybeCurrentDynamicLayer();
  13151. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13152. int64_t cur_level = maybe_layer->layerId();
  13153. if (!isBatchedAtLevel(self, cur_level)) {
  13154. return at::_ops::_sparse_csr_sum_dim_dtype::call(self, dim, keepdim, dtype);
  13155. }
  13156. Tensor self_value;
  13157. optional<int64_t> self_bdim;
  13158. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13159. auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
  13160. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13161. }
  13162. template <typename batch_rule_t, batch_rule_t batch_rule>
  13163. at::Tensor _sparse_csr_prod_dim_dtype_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
  13164. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13165. auto maybe_layer = maybeCurrentDynamicLayer();
  13166. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13167. int64_t cur_level = maybe_layer->layerId();
  13168. if (!isBatchedAtLevel(self, cur_level)) {
  13169. return at::_ops::_sparse_csr_prod_dim_dtype::call(self, dim, keepdim, dtype);
  13170. }
  13171. Tensor self_value;
  13172. optional<int64_t> self_bdim;
  13173. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13174. auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
  13175. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13176. }
  13177. template <typename batch_rule_t, batch_rule_t batch_rule>
  13178. at::Tensor _sparse_softmax_int_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
  13179. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13180. auto maybe_layer = maybeCurrentDynamicLayer();
  13181. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13182. int64_t cur_level = maybe_layer->layerId();
  13183. if (!isBatchedAtLevel(self, cur_level)) {
  13184. return at::_ops::_sparse_softmax_int::call(self, dim, dtype);
  13185. }
  13186. Tensor self_value;
  13187. optional<int64_t> self_bdim;
  13188. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13189. auto results = batch_rule(self_value, self_bdim, dim, dtype);
  13190. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13191. }
  13192. template <typename batch_rule_t, batch_rule_t batch_rule>
  13193. at::Tensor _sparse_softmax_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
  13194. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13195. auto maybe_layer = maybeCurrentDynamicLayer();
  13196. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13197. int64_t cur_level = maybe_layer->layerId();
  13198. if (!isBatchedAtLevel(self, cur_level)) {
  13199. return at::_ops::_sparse_softmax_Dimname::call(self, dim, dtype);
  13200. }
  13201. Tensor self_value;
  13202. optional<int64_t> self_bdim;
  13203. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13204. auto results = batch_rule(self_value, self_bdim, dim, dtype);
  13205. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13206. }
  13207. template <typename batch_rule_t, batch_rule_t batch_rule>
  13208. at::Tensor _sparse_softmax_generated_plumbing(const at::Tensor & self, int64_t dim, bool half_to_float) {
  13209. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13210. auto maybe_layer = maybeCurrentDynamicLayer();
  13211. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13212. int64_t cur_level = maybe_layer->layerId();
  13213. if (!isBatchedAtLevel(self, cur_level)) {
  13214. return at::_ops::_sparse_softmax::call(self, dim, half_to_float);
  13215. }
  13216. Tensor self_value;
  13217. optional<int64_t> self_bdim;
  13218. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13219. auto results = batch_rule(self_value, self_bdim, dim, half_to_float);
  13220. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13221. }
  13222. template <typename batch_rule_t, batch_rule_t batch_rule>
  13223. at::Tensor _sparse_softmax_backward_data_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) {
  13224. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13225. auto maybe_layer = maybeCurrentDynamicLayer();
  13226. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13227. int64_t cur_level = maybe_layer->layerId();
  13228. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
  13229. return at::_ops::_sparse_softmax_backward_data::call(grad_output, output, dim, self);
  13230. }
  13231. Tensor grad_output_value;
  13232. optional<int64_t> grad_output_bdim;
  13233. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  13234. Tensor output_value;
  13235. optional<int64_t> output_bdim;
  13236. std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
  13237. Tensor self_value;
  13238. optional<int64_t> self_bdim;
  13239. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13240. auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, dim, self_value, self_bdim);
  13241. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13242. }
  13243. template <typename batch_rule_t, batch_rule_t batch_rule>
  13244. at::Tensor _sparse_log_softmax_int_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
  13245. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13246. auto maybe_layer = maybeCurrentDynamicLayer();
  13247. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13248. int64_t cur_level = maybe_layer->layerId();
  13249. if (!isBatchedAtLevel(self, cur_level)) {
  13250. return at::_ops::_sparse_log_softmax_int::call(self, dim, dtype);
  13251. }
  13252. Tensor self_value;
  13253. optional<int64_t> self_bdim;
  13254. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13255. auto results = batch_rule(self_value, self_bdim, dim, dtype);
  13256. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13257. }
  13258. template <typename batch_rule_t, batch_rule_t batch_rule>
  13259. at::Tensor _sparse_log_softmax_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
  13260. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13261. auto maybe_layer = maybeCurrentDynamicLayer();
  13262. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13263. int64_t cur_level = maybe_layer->layerId();
  13264. if (!isBatchedAtLevel(self, cur_level)) {
  13265. return at::_ops::_sparse_log_softmax_Dimname::call(self, dim, dtype);
  13266. }
  13267. Tensor self_value;
  13268. optional<int64_t> self_bdim;
  13269. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13270. auto results = batch_rule(self_value, self_bdim, dim, dtype);
  13271. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13272. }
  13273. template <typename batch_rule_t, batch_rule_t batch_rule>
  13274. at::Tensor _sparse_log_softmax_generated_plumbing(const at::Tensor & self, int64_t dim, bool half_to_float) {
  13275. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13276. auto maybe_layer = maybeCurrentDynamicLayer();
  13277. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13278. int64_t cur_level = maybe_layer->layerId();
  13279. if (!isBatchedAtLevel(self, cur_level)) {
  13280. return at::_ops::_sparse_log_softmax::call(self, dim, half_to_float);
  13281. }
  13282. Tensor self_value;
  13283. optional<int64_t> self_bdim;
  13284. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13285. auto results = batch_rule(self_value, self_bdim, dim, half_to_float);
  13286. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13287. }
  13288. template <typename batch_rule_t, batch_rule_t batch_rule>
  13289. at::Tensor _sparse_log_softmax_backward_data_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) {
  13290. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13291. auto maybe_layer = maybeCurrentDynamicLayer();
  13292. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13293. int64_t cur_level = maybe_layer->layerId();
  13294. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
  13295. return at::_ops::_sparse_log_softmax_backward_data::call(grad_output, output, dim, self);
  13296. }
  13297. Tensor grad_output_value;
  13298. optional<int64_t> grad_output_bdim;
  13299. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  13300. Tensor output_value;
  13301. optional<int64_t> output_bdim;
  13302. std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
  13303. Tensor self_value;
  13304. optional<int64_t> self_bdim;
  13305. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13306. auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, dim, self_value, self_bdim);
  13307. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13308. }
  13309. template <typename batch_rule_t, batch_rule_t batch_rule>
  13310. at::Tensor _spdiags_generated_plumbing(const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, c10::optional<at::Layout> layout) {
  13311. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13312. auto maybe_layer = maybeCurrentDynamicLayer();
  13313. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13314. int64_t cur_level = maybe_layer->layerId();
  13315. if (!isBatchedAtLevel(diagonals, cur_level) && !isBatchedAtLevel(offsets, cur_level)) {
  13316. return at::_ops::_spdiags::call(diagonals, offsets, shape, layout);
  13317. }
  13318. Tensor diagonals_value;
  13319. optional<int64_t> diagonals_bdim;
  13320. std::tie(diagonals_value, diagonals_bdim) = unwrapTensorAtLevel(diagonals, cur_level);
  13321. Tensor offsets_value;
  13322. optional<int64_t> offsets_bdim;
  13323. std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level);
  13324. auto results = batch_rule(diagonals_value, diagonals_bdim, offsets_value, offsets_bdim, shape, layout);
  13325. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13326. }
  13327. template <typename batch_rule_t, batch_rule_t batch_rule>
  13328. at::Tensor norm_ScalarOpt_dtype_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::ScalarType dtype) {
  13329. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13330. auto maybe_layer = maybeCurrentDynamicLayer();
  13331. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13332. int64_t cur_level = maybe_layer->layerId();
  13333. if (!isBatchedAtLevel(self, cur_level)) {
  13334. return at::_ops::norm_ScalarOpt_dtype::call(self, p, dtype);
  13335. }
  13336. Tensor self_value;
  13337. optional<int64_t> self_bdim;
  13338. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13339. auto results = batch_rule(self_value, self_bdim, p, dtype);
  13340. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13341. }
  13342. template <typename batch_rule_t, batch_rule_t batch_rule>
  13343. at::Tensor norm_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & p) {
  13344. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13345. auto maybe_layer = maybeCurrentDynamicLayer();
  13346. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13347. int64_t cur_level = maybe_layer->layerId();
  13348. if (!isBatchedAtLevel(self, cur_level)) {
  13349. return at::_ops::norm_Scalar::call(self, p);
  13350. }
  13351. Tensor self_value;
  13352. optional<int64_t> self_bdim;
  13353. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13354. auto results = batch_rule(self_value, self_bdim, p);
  13355. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13356. }
  13357. template <typename batch_rule_t, batch_rule_t batch_rule>
  13358. at::Tensor norm_ScalarOpt_dim_dtype_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) {
  13359. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13360. auto maybe_layer = maybeCurrentDynamicLayer();
  13361. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13362. int64_t cur_level = maybe_layer->layerId();
  13363. if (!isBatchedAtLevel(self, cur_level)) {
  13364. return at::_ops::norm_ScalarOpt_dim_dtype::call(self, p, dim, keepdim, dtype);
  13365. }
  13366. Tensor self_value;
  13367. optional<int64_t> self_bdim;
  13368. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13369. auto results = batch_rule(self_value, self_bdim, p, dim, keepdim, dtype);
  13370. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13371. }
  13372. template <typename batch_rule_t, batch_rule_t batch_rule>
  13373. at::Tensor norm_ScalarOpt_dim_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim) {
  13374. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13375. auto maybe_layer = maybeCurrentDynamicLayer();
  13376. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13377. int64_t cur_level = maybe_layer->layerId();
  13378. if (!isBatchedAtLevel(self, cur_level)) {
  13379. return at::_ops::norm_ScalarOpt_dim::call(self, p, dim, keepdim);
  13380. }
  13381. Tensor self_value;
  13382. optional<int64_t> self_bdim;
  13383. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13384. auto results = batch_rule(self_value, self_bdim, p, dim, keepdim);
  13385. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13386. }
  13387. template <typename batch_rule_t, batch_rule_t batch_rule>
  13388. at::Tensor norm_names_ScalarOpt_dim_dtype_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype) {
  13389. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13390. auto maybe_layer = maybeCurrentDynamicLayer();
  13391. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13392. int64_t cur_level = maybe_layer->layerId();
  13393. if (!isBatchedAtLevel(self, cur_level)) {
  13394. return at::_ops::norm_names_ScalarOpt_dim_dtype::call(self, p, dim, keepdim, dtype);
  13395. }
  13396. Tensor self_value;
  13397. optional<int64_t> self_bdim;
  13398. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13399. auto results = batch_rule(self_value, self_bdim, p, dim, keepdim, dtype);
  13400. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13401. }
  13402. template <typename batch_rule_t, batch_rule_t batch_rule>
  13403. at::Tensor norm_names_ScalarOpt_dim_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim) {
  13404. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13405. auto maybe_layer = maybeCurrentDynamicLayer();
  13406. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13407. int64_t cur_level = maybe_layer->layerId();
  13408. if (!isBatchedAtLevel(self, cur_level)) {
  13409. return at::_ops::norm_names_ScalarOpt_dim::call(self, p, dim, keepdim);
  13410. }
  13411. Tensor self_value;
  13412. optional<int64_t> self_bdim;
  13413. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13414. auto results = batch_rule(self_value, self_bdim, p, dim, keepdim);
  13415. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13416. }
  13417. template <typename batch_rule_t, batch_rule_t batch_rule>
  13418. ::std::tuple<at::Tensor,at::Tensor> frexp_Tensor_generated_plumbing(const at::Tensor & self) {
  13419. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13420. auto maybe_layer = maybeCurrentDynamicLayer();
  13421. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13422. int64_t cur_level = maybe_layer->layerId();
  13423. if (!isBatchedAtLevel(self, cur_level)) {
  13424. return at::_ops::frexp_Tensor::call(self);
  13425. }
  13426. Tensor self_value;
  13427. optional<int64_t> self_bdim;
  13428. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13429. auto results = batch_rule(self_value, self_bdim);
  13430. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  13431. }
  13432. template <typename batch_rule_t, batch_rule_t batch_rule>
  13433. at::Tensor frobenius_norm_dim_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
  13434. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13435. auto maybe_layer = maybeCurrentDynamicLayer();
  13436. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13437. int64_t cur_level = maybe_layer->layerId();
  13438. if (!isBatchedAtLevel(self, cur_level)) {
  13439. return at::_ops::frobenius_norm_dim::call(self, dim, keepdim);
  13440. }
  13441. Tensor self_value;
  13442. optional<int64_t> self_bdim;
  13443. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13444. auto results = batch_rule(self_value, self_bdim, dim, keepdim);
  13445. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13446. }
  13447. template <typename batch_rule_t, batch_rule_t batch_rule>
  13448. at::Tensor nuclear_norm_generated_plumbing(const at::Tensor & self, bool keepdim) {
  13449. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13450. auto maybe_layer = maybeCurrentDynamicLayer();
  13451. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13452. int64_t cur_level = maybe_layer->layerId();
  13453. if (!isBatchedAtLevel(self, cur_level)) {
  13454. return at::_ops::nuclear_norm::call(self, keepdim);
  13455. }
  13456. Tensor self_value;
  13457. optional<int64_t> self_bdim;
  13458. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13459. auto results = batch_rule(self_value, self_bdim, keepdim);
  13460. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13461. }
  13462. template <typename batch_rule_t, batch_rule_t batch_rule>
  13463. at::Tensor nuclear_norm_dim_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
  13464. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13465. auto maybe_layer = maybeCurrentDynamicLayer();
  13466. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13467. int64_t cur_level = maybe_layer->layerId();
  13468. if (!isBatchedAtLevel(self, cur_level)) {
  13469. return at::_ops::nuclear_norm_dim::call(self, dim, keepdim);
  13470. }
  13471. Tensor self_value;
  13472. optional<int64_t> self_bdim;
  13473. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13474. auto results = batch_rule(self_value, self_bdim, dim, keepdim);
  13475. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13476. }
  13477. template <typename batch_rule_t, batch_rule_t batch_rule>
  13478. at::Tensor clone_generated_plumbing(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) {
  13479. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13480. auto maybe_layer = maybeCurrentDynamicLayer();
  13481. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13482. int64_t cur_level = maybe_layer->layerId();
  13483. if (!isBatchedAtLevel(self, cur_level)) {
  13484. return at::_ops::clone::call(self, memory_format);
  13485. }
  13486. Tensor self_value;
  13487. optional<int64_t> self_bdim;
  13488. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13489. auto results = batch_rule(self_value, self_bdim, memory_format);
  13490. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13491. }
  13492. template <typename batch_rule_t, batch_rule_t batch_rule>
  13493. at::Tensor positive_generated_plumbing(const at::Tensor & self) {
  13494. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13495. auto maybe_layer = maybeCurrentDynamicLayer();
  13496. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13497. int64_t cur_level = maybe_layer->layerId();
  13498. if (!isBatchedAtLevel(self, cur_level)) {
  13499. return at::_ops::positive::call(self);
  13500. }
  13501. Tensor self_value;
  13502. optional<int64_t> self_bdim;
  13503. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13504. auto results = batch_rule(self_value, self_bdim);
  13505. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13506. }
  13507. template <typename batch_rule_t, batch_rule_t batch_rule>
  13508. const at::Tensor & resize_as__generated_plumbing(const at::Tensor & self, const at::Tensor & the_template, c10::optional<at::MemoryFormat> memory_format) {
  13509. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13510. auto maybe_layer = maybeCurrentDynamicLayer();
  13511. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  13512. int64_t cur_level = maybe_layer->layerId();
  13513. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(the_template, cur_level)) {
  13514. return at::_ops::resize_as_::call(self, the_template, memory_format);
  13515. }
  13516. Tensor self_value;
  13517. optional<int64_t> self_bdim;
  13518. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13519. Tensor the_template_value;
  13520. optional<int64_t> the_template_bdim;
  13521. std::tie(the_template_value, the_template_bdim) = unwrapTensorAtLevel(the_template, cur_level);
  13522. batch_rule(self_value, self_bdim, the_template_value, the_template_bdim, memory_format);
  13523. return self;
  13524. }
  13525. template <typename batch_rule_t, batch_rule_t batch_rule>
  13526. const at::Tensor & resize_as_sparse__generated_plumbing(const at::Tensor & self, const at::Tensor & the_template) {
  13527. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13528. auto maybe_layer = maybeCurrentDynamicLayer();
  13529. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  13530. int64_t cur_level = maybe_layer->layerId();
  13531. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(the_template, cur_level)) {
  13532. return at::_ops::resize_as_sparse_::call(self, the_template);
  13533. }
  13534. Tensor self_value;
  13535. optional<int64_t> self_bdim;
  13536. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13537. Tensor the_template_value;
  13538. optional<int64_t> the_template_bdim;
  13539. std::tie(the_template_value, the_template_bdim) = unwrapTensorAtLevel(the_template, cur_level);
  13540. batch_rule(self_value, self_bdim, the_template_value, the_template_bdim);
  13541. return self;
  13542. }
  13543. template <typename batch_rule_t, batch_rule_t batch_rule>
  13544. at::Tensor & zero__generated_plumbing(at::Tensor & self) {
  13545. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13546. auto maybe_layer = maybeCurrentDynamicLayer();
  13547. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  13548. int64_t cur_level = maybe_layer->layerId();
  13549. if (!isBatchedAtLevel(self, cur_level)) {
  13550. return at::_ops::zero_::call(self);
  13551. }
  13552. Tensor self_value;
  13553. optional<int64_t> self_bdim;
  13554. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13555. batch_rule(self_value, self_bdim);
  13556. return self;
  13557. }
  13558. template <typename batch_rule_t, batch_rule_t batch_rule>
  13559. at::Tensor sub_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
  13560. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13561. auto maybe_layer = maybeCurrentDynamicLayer();
  13562. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13563. int64_t cur_level = maybe_layer->layerId();
  13564. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  13565. return at::_ops::sub_Tensor::call(self, other, alpha);
  13566. }
  13567. Tensor self_value;
  13568. optional<int64_t> self_bdim;
  13569. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13570. Tensor other_value;
  13571. optional<int64_t> other_bdim;
  13572. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  13573. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
  13574. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13575. }
  13576. template <typename batch_rule_t, batch_rule_t batch_rule>
  13577. at::Tensor & sub__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
  13578. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13579. auto maybe_layer = maybeCurrentDynamicLayer();
  13580. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  13581. int64_t cur_level = maybe_layer->layerId();
  13582. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  13583. return at::_ops::sub__Tensor::call(self, other, alpha);
  13584. }
  13585. Tensor self_value;
  13586. optional<int64_t> self_bdim;
  13587. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13588. Tensor other_value;
  13589. optional<int64_t> other_bdim;
  13590. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  13591. batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
  13592. return self;
  13593. }
  13594. template <typename batch_rule_t, batch_rule_t batch_rule>
  13595. at::Tensor sub_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
  13596. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13597. auto maybe_layer = maybeCurrentDynamicLayer();
  13598. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13599. int64_t cur_level = maybe_layer->layerId();
  13600. if (!isBatchedAtLevel(self, cur_level)) {
  13601. return at::_ops::sub_Scalar::call(self, other, alpha);
  13602. }
  13603. Tensor self_value;
  13604. optional<int64_t> self_bdim;
  13605. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13606. auto results = batch_rule(self_value, self_bdim, other, alpha);
  13607. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13608. }
  13609. template <typename batch_rule_t, batch_rule_t batch_rule>
  13610. at::Tensor & sub__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
  13611. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13612. auto maybe_layer = maybeCurrentDynamicLayer();
  13613. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  13614. int64_t cur_level = maybe_layer->layerId();
  13615. if (!isBatchedAtLevel(self, cur_level)) {
  13616. return at::_ops::sub__Scalar::call(self, other, alpha);
  13617. }
  13618. Tensor self_value;
  13619. optional<int64_t> self_bdim;
  13620. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13621. batch_rule(self_value, self_bdim, other, alpha);
  13622. return self;
  13623. }
  13624. template <typename batch_rule_t, batch_rule_t batch_rule>
  13625. at::Tensor subtract_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
  13626. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13627. auto maybe_layer = maybeCurrentDynamicLayer();
  13628. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13629. int64_t cur_level = maybe_layer->layerId();
  13630. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  13631. return at::_ops::subtract_Tensor::call(self, other, alpha);
  13632. }
  13633. Tensor self_value;
  13634. optional<int64_t> self_bdim;
  13635. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13636. Tensor other_value;
  13637. optional<int64_t> other_bdim;
  13638. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  13639. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
  13640. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13641. }
  13642. template <typename batch_rule_t, batch_rule_t batch_rule>
  13643. at::Tensor & subtract__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
  13644. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13645. auto maybe_layer = maybeCurrentDynamicLayer();
  13646. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  13647. int64_t cur_level = maybe_layer->layerId();
  13648. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  13649. return at::_ops::subtract__Tensor::call(self, other, alpha);
  13650. }
  13651. Tensor self_value;
  13652. optional<int64_t> self_bdim;
  13653. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13654. Tensor other_value;
  13655. optional<int64_t> other_bdim;
  13656. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  13657. batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
  13658. return self;
  13659. }
  13660. template <typename batch_rule_t, batch_rule_t batch_rule>
  13661. at::Tensor subtract_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
  13662. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13663. auto maybe_layer = maybeCurrentDynamicLayer();
  13664. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13665. int64_t cur_level = maybe_layer->layerId();
  13666. if (!isBatchedAtLevel(self, cur_level)) {
  13667. return at::_ops::subtract_Scalar::call(self, other, alpha);
  13668. }
  13669. Tensor self_value;
  13670. optional<int64_t> self_bdim;
  13671. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13672. auto results = batch_rule(self_value, self_bdim, other, alpha);
  13673. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13674. }
  13675. template <typename batch_rule_t, batch_rule_t batch_rule>
  13676. at::Tensor & subtract__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
  13677. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13678. auto maybe_layer = maybeCurrentDynamicLayer();
  13679. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  13680. int64_t cur_level = maybe_layer->layerId();
  13681. if (!isBatchedAtLevel(self, cur_level)) {
  13682. return at::_ops::subtract__Scalar::call(self, other, alpha);
  13683. }
  13684. Tensor self_value;
  13685. optional<int64_t> self_bdim;
  13686. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13687. batch_rule(self_value, self_bdim, other, alpha);
  13688. return self;
  13689. }
  13690. template <typename batch_rule_t, batch_rule_t batch_rule>
  13691. at::Tensor rsub_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
  13692. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13693. auto maybe_layer = maybeCurrentDynamicLayer();
  13694. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13695. int64_t cur_level = maybe_layer->layerId();
  13696. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  13697. return at::_ops::rsub_Tensor::call(self, other, alpha);
  13698. }
  13699. Tensor self_value;
  13700. optional<int64_t> self_bdim;
  13701. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13702. Tensor other_value;
  13703. optional<int64_t> other_bdim;
  13704. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  13705. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
  13706. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13707. }
  13708. template <typename batch_rule_t, batch_rule_t batch_rule>
  13709. at::Tensor heaviside_generated_plumbing(const at::Tensor & self, const at::Tensor & values) {
  13710. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13711. auto maybe_layer = maybeCurrentDynamicLayer();
  13712. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13713. int64_t cur_level = maybe_layer->layerId();
  13714. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(values, cur_level)) {
  13715. return at::_ops::heaviside::call(self, values);
  13716. }
  13717. Tensor self_value;
  13718. optional<int64_t> self_bdim;
  13719. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13720. Tensor values_value;
  13721. optional<int64_t> values_bdim;
  13722. std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
  13723. auto results = batch_rule(self_value, self_bdim, values_value, values_bdim);
  13724. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13725. }
  13726. template <typename batch_rule_t, batch_rule_t batch_rule>
  13727. at::Tensor & heaviside__generated_plumbing(at::Tensor & self, const at::Tensor & values) {
  13728. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13729. auto maybe_layer = maybeCurrentDynamicLayer();
  13730. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  13731. int64_t cur_level = maybe_layer->layerId();
  13732. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(values, cur_level)) {
  13733. return at::_ops::heaviside_::call(self, values);
  13734. }
  13735. Tensor self_value;
  13736. optional<int64_t> self_bdim;
  13737. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13738. Tensor values_value;
  13739. optional<int64_t> values_bdim;
  13740. std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
  13741. batch_rule(self_value, self_bdim, values_value, values_bdim);
  13742. return self;
  13743. }
  13744. template <typename batch_rule_t, batch_rule_t batch_rule>
  13745. at::Tensor rsub_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
  13746. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13747. auto maybe_layer = maybeCurrentDynamicLayer();
  13748. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13749. int64_t cur_level = maybe_layer->layerId();
  13750. if (!isBatchedAtLevel(self, cur_level)) {
  13751. return at::_ops::rsub_Scalar::call(self, other, alpha);
  13752. }
  13753. Tensor self_value;
  13754. optional<int64_t> self_bdim;
  13755. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13756. auto results = batch_rule(self_value, self_bdim, other, alpha);
  13757. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13758. }
  13759. template <typename batch_rule_t, batch_rule_t batch_rule>
  13760. at::Tensor _sparse_addmm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
  13761. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13762. auto maybe_layer = maybeCurrentDynamicLayer();
  13763. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13764. int64_t cur_level = maybe_layer->layerId();
  13765. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
  13766. return at::_ops::_sparse_addmm::call(self, mat1, mat2, beta, alpha);
  13767. }
  13768. Tensor self_value;
  13769. optional<int64_t> self_bdim;
  13770. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13771. Tensor mat1_value;
  13772. optional<int64_t> mat1_bdim;
  13773. std::tie(mat1_value, mat1_bdim) = unwrapTensorAtLevel(mat1, cur_level);
  13774. Tensor mat2_value;
  13775. optional<int64_t> mat2_bdim;
  13776. std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
  13777. auto results = batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha);
  13778. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13779. }
  13780. template <typename batch_rule_t, batch_rule_t batch_rule>
  13781. at::Tensor sparse_sampled_addmm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
  13782. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13783. auto maybe_layer = maybeCurrentDynamicLayer();
  13784. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13785. int64_t cur_level = maybe_layer->layerId();
  13786. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
  13787. return at::_ops::sparse_sampled_addmm::call(self, mat1, mat2, beta, alpha);
  13788. }
  13789. Tensor self_value;
  13790. optional<int64_t> self_bdim;
  13791. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13792. Tensor mat1_value;
  13793. optional<int64_t> mat1_bdim;
  13794. std::tie(mat1_value, mat1_bdim) = unwrapTensorAtLevel(mat1, cur_level);
  13795. Tensor mat2_value;
  13796. optional<int64_t> mat2_bdim;
  13797. std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
  13798. auto results = batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha);
  13799. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13800. }
  13801. template <typename batch_rule_t, batch_rule_t batch_rule>
  13802. ::std::tuple<at::Tensor,at::Tensor> _sparse_mm_reduce_impl_generated_plumbing(const at::Tensor & self, const at::Tensor & other, c10::string_view reduce) {
  13803. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13804. auto maybe_layer = maybeCurrentDynamicLayer();
  13805. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13806. int64_t cur_level = maybe_layer->layerId();
  13807. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  13808. return at::_ops::_sparse_mm_reduce_impl::call(self, other, reduce);
  13809. }
  13810. Tensor self_value;
  13811. optional<int64_t> self_bdim;
  13812. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13813. Tensor other_value;
  13814. optional<int64_t> other_bdim;
  13815. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  13816. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, reduce);
  13817. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  13818. }
  13819. template <typename batch_rule_t, batch_rule_t batch_rule>
  13820. ::std::tuple<at::Tensor,at::Tensor> _sparse_mm_reduce_impl_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad_out, const at::Tensor & weight, c10::string_view reduce, const at::Tensor & arg_out, ::std::array<bool,2> output_mask) {
  13821. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13822. auto maybe_layer = maybeCurrentDynamicLayer();
  13823. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13824. int64_t cur_level = maybe_layer->layerId();
  13825. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(arg_out, cur_level)) {
  13826. return at::_ops::_sparse_mm_reduce_impl_backward::call(self, grad_out, weight, reduce, arg_out, output_mask);
  13827. }
  13828. Tensor self_value;
  13829. optional<int64_t> self_bdim;
  13830. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13831. Tensor grad_out_value;
  13832. optional<int64_t> grad_out_bdim;
  13833. std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level);
  13834. Tensor weight_value;
  13835. optional<int64_t> weight_bdim;
  13836. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  13837. Tensor arg_out_value;
  13838. optional<int64_t> arg_out_bdim;
  13839. std::tie(arg_out_value, arg_out_bdim) = unwrapTensorAtLevel(arg_out, cur_level);
  13840. auto results = batch_rule(self_value, self_bdim, grad_out_value, grad_out_bdim, weight_value, weight_bdim, reduce, arg_out_value, arg_out_bdim, output_mask);
  13841. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  13842. }
  13843. template <typename batch_rule_t, batch_rule_t batch_rule>
  13844. at::Tensor addmm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
  13845. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13846. auto maybe_layer = maybeCurrentDynamicLayer();
  13847. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13848. int64_t cur_level = maybe_layer->layerId();
  13849. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
  13850. return at::_ops::addmm::call(self, mat1, mat2, beta, alpha);
  13851. }
  13852. Tensor self_value;
  13853. optional<int64_t> self_bdim;
  13854. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13855. Tensor mat1_value;
  13856. optional<int64_t> mat1_bdim;
  13857. std::tie(mat1_value, mat1_bdim) = unwrapTensorAtLevel(mat1, cur_level);
  13858. Tensor mat2_value;
  13859. optional<int64_t> mat2_bdim;
  13860. std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
  13861. auto results = batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha);
  13862. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13863. }
  13864. template <typename batch_rule_t, batch_rule_t batch_rule>
  13865. at::Tensor & addmm__generated_plumbing(at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
  13866. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13867. auto maybe_layer = maybeCurrentDynamicLayer();
  13868. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  13869. int64_t cur_level = maybe_layer->layerId();
  13870. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
  13871. return at::_ops::addmm_::call(self, mat1, mat2, beta, alpha);
  13872. }
  13873. Tensor self_value;
  13874. optional<int64_t> self_bdim;
  13875. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13876. Tensor mat1_value;
  13877. optional<int64_t> mat1_bdim;
  13878. std::tie(mat1_value, mat1_bdim) = unwrapTensorAtLevel(mat1, cur_level);
  13879. Tensor mat2_value;
  13880. optional<int64_t> mat2_bdim;
  13881. std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
  13882. batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha);
  13883. return self;
  13884. }
  13885. template <typename batch_rule_t, batch_rule_t batch_rule>
  13886. at::Tensor _addmm_activation_generated_plumbing(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu) {
  13887. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13888. auto maybe_layer = maybeCurrentDynamicLayer();
  13889. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13890. int64_t cur_level = maybe_layer->layerId();
  13891. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
  13892. return at::_ops::_addmm_activation::call(self, mat1, mat2, beta, alpha, use_gelu);
  13893. }
  13894. Tensor self_value;
  13895. optional<int64_t> self_bdim;
  13896. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  13897. Tensor mat1_value;
  13898. optional<int64_t> mat1_bdim;
  13899. std::tie(mat1_value, mat1_bdim) = unwrapTensorAtLevel(mat1, cur_level);
  13900. Tensor mat2_value;
  13901. optional<int64_t> mat2_bdim;
  13902. std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
  13903. auto results = batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha, use_gelu);
  13904. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13905. }
  13906. template <typename batch_rule_t, batch_rule_t batch_rule>
  13907. at::Tensor sparse_compressed_tensor_comp_plain_value_size_generated_plumbing(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
  13908. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13909. auto maybe_layer = maybeCurrentDynamicLayer();
  13910. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13911. int64_t cur_level = maybe_layer->layerId();
  13912. if (!isBatchedAtLevel(compressed_indices, cur_level) && !isBatchedAtLevel(plain_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
  13913. return at::_ops::sparse_compressed_tensor_comp_plain_value_size::call(compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory);
  13914. }
  13915. Tensor compressed_indices_value;
  13916. optional<int64_t> compressed_indices_bdim;
  13917. std::tie(compressed_indices_value, compressed_indices_bdim) = unwrapTensorAtLevel(compressed_indices, cur_level);
  13918. Tensor plain_indices_value;
  13919. optional<int64_t> plain_indices_bdim;
  13920. std::tie(plain_indices_value, plain_indices_bdim) = unwrapTensorAtLevel(plain_indices, cur_level);
  13921. Tensor values_value;
  13922. optional<int64_t> values_bdim;
  13923. std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
  13924. auto results = batch_rule(compressed_indices_value, compressed_indices_bdim, plain_indices_value, plain_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
  13925. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13926. }
  13927. template <typename batch_rule_t, batch_rule_t batch_rule>
  13928. at::Tensor sparse_csr_tensor_crow_col_value_size_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
  13929. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13930. auto maybe_layer = maybeCurrentDynamicLayer();
  13931. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13932. int64_t cur_level = maybe_layer->layerId();
  13933. if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
  13934. return at::_ops::sparse_csr_tensor_crow_col_value_size::call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
  13935. }
  13936. Tensor crow_indices_value;
  13937. optional<int64_t> crow_indices_bdim;
  13938. std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level);
  13939. Tensor col_indices_value;
  13940. optional<int64_t> col_indices_bdim;
  13941. std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level);
  13942. Tensor values_value;
  13943. optional<int64_t> values_bdim;
  13944. std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
  13945. auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
  13946. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13947. }
  13948. template <typename batch_rule_t, batch_rule_t batch_rule>
  13949. at::Tensor sparse_csc_tensor_ccol_row_value_size_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
  13950. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13951. auto maybe_layer = maybeCurrentDynamicLayer();
  13952. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13953. int64_t cur_level = maybe_layer->layerId();
  13954. if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
  13955. return at::_ops::sparse_csc_tensor_ccol_row_value_size::call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
  13956. }
  13957. Tensor ccol_indices_value;
  13958. optional<int64_t> ccol_indices_bdim;
  13959. std::tie(ccol_indices_value, ccol_indices_bdim) = unwrapTensorAtLevel(ccol_indices, cur_level);
  13960. Tensor row_indices_value;
  13961. optional<int64_t> row_indices_bdim;
  13962. std::tie(row_indices_value, row_indices_bdim) = unwrapTensorAtLevel(row_indices, cur_level);
  13963. Tensor values_value;
  13964. optional<int64_t> values_bdim;
  13965. std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
  13966. auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
  13967. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13968. }
  13969. template <typename batch_rule_t, batch_rule_t batch_rule>
  13970. at::Tensor sparse_bsr_tensor_crow_col_value_size_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
  13971. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13972. auto maybe_layer = maybeCurrentDynamicLayer();
  13973. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13974. int64_t cur_level = maybe_layer->layerId();
  13975. if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
  13976. return at::_ops::sparse_bsr_tensor_crow_col_value_size::call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
  13977. }
  13978. Tensor crow_indices_value;
  13979. optional<int64_t> crow_indices_bdim;
  13980. std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level);
  13981. Tensor col_indices_value;
  13982. optional<int64_t> col_indices_bdim;
  13983. std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level);
  13984. Tensor values_value;
  13985. optional<int64_t> values_bdim;
  13986. std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
  13987. auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
  13988. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  13989. }
  13990. template <typename batch_rule_t, batch_rule_t batch_rule>
  13991. at::Tensor sparse_bsc_tensor_ccol_row_value_size_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
  13992. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  13993. auto maybe_layer = maybeCurrentDynamicLayer();
  13994. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  13995. int64_t cur_level = maybe_layer->layerId();
  13996. if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
  13997. return at::_ops::sparse_bsc_tensor_ccol_row_value_size::call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
  13998. }
  13999. Tensor ccol_indices_value;
  14000. optional<int64_t> ccol_indices_bdim;
  14001. std::tie(ccol_indices_value, ccol_indices_bdim) = unwrapTensorAtLevel(ccol_indices, cur_level);
  14002. Tensor row_indices_value;
  14003. optional<int64_t> row_indices_bdim;
  14004. std::tie(row_indices_value, row_indices_bdim) = unwrapTensorAtLevel(row_indices, cur_level);
  14005. Tensor values_value;
  14006. optional<int64_t> values_bdim;
  14007. std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
  14008. auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
  14009. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14010. }
  14011. template <typename batch_rule_t, batch_rule_t batch_rule>
  14012. at::Tensor sparse_compressed_tensor_comp_plain_value_generated_plumbing(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
  14013. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14014. auto maybe_layer = maybeCurrentDynamicLayer();
  14015. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14016. int64_t cur_level = maybe_layer->layerId();
  14017. if (!isBatchedAtLevel(compressed_indices, cur_level) && !isBatchedAtLevel(plain_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
  14018. return at::_ops::sparse_compressed_tensor_comp_plain_value::call(compressed_indices, plain_indices, values, dtype, layout, device, pin_memory);
  14019. }
  14020. Tensor compressed_indices_value;
  14021. optional<int64_t> compressed_indices_bdim;
  14022. std::tie(compressed_indices_value, compressed_indices_bdim) = unwrapTensorAtLevel(compressed_indices, cur_level);
  14023. Tensor plain_indices_value;
  14024. optional<int64_t> plain_indices_bdim;
  14025. std::tie(plain_indices_value, plain_indices_bdim) = unwrapTensorAtLevel(plain_indices, cur_level);
  14026. Tensor values_value;
  14027. optional<int64_t> values_bdim;
  14028. std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
  14029. auto results = batch_rule(compressed_indices_value, compressed_indices_bdim, plain_indices_value, plain_indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory);
  14030. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14031. }
  14032. template <typename batch_rule_t, batch_rule_t batch_rule>
  14033. at::Tensor sparse_csr_tensor_crow_col_value_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
  14034. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14035. auto maybe_layer = maybeCurrentDynamicLayer();
  14036. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14037. int64_t cur_level = maybe_layer->layerId();
  14038. if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
  14039. return at::_ops::sparse_csr_tensor_crow_col_value::call(crow_indices, col_indices, values, dtype, layout, device, pin_memory);
  14040. }
  14041. Tensor crow_indices_value;
  14042. optional<int64_t> crow_indices_bdim;
  14043. std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level);
  14044. Tensor col_indices_value;
  14045. optional<int64_t> col_indices_bdim;
  14046. std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level);
  14047. Tensor values_value;
  14048. optional<int64_t> values_bdim;
  14049. std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
  14050. auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory);
  14051. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14052. }
  14053. template <typename batch_rule_t, batch_rule_t batch_rule>
  14054. at::Tensor sparse_csc_tensor_ccol_row_value_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
  14055. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14056. auto maybe_layer = maybeCurrentDynamicLayer();
  14057. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14058. int64_t cur_level = maybe_layer->layerId();
  14059. if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
  14060. return at::_ops::sparse_csc_tensor_ccol_row_value::call(ccol_indices, row_indices, values, dtype, layout, device, pin_memory);
  14061. }
  14062. Tensor ccol_indices_value;
  14063. optional<int64_t> ccol_indices_bdim;
  14064. std::tie(ccol_indices_value, ccol_indices_bdim) = unwrapTensorAtLevel(ccol_indices, cur_level);
  14065. Tensor row_indices_value;
  14066. optional<int64_t> row_indices_bdim;
  14067. std::tie(row_indices_value, row_indices_bdim) = unwrapTensorAtLevel(row_indices, cur_level);
  14068. Tensor values_value;
  14069. optional<int64_t> values_bdim;
  14070. std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
  14071. auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory);
  14072. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14073. }
  14074. template <typename batch_rule_t, batch_rule_t batch_rule>
  14075. at::Tensor sparse_bsr_tensor_crow_col_value_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
  14076. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14077. auto maybe_layer = maybeCurrentDynamicLayer();
  14078. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14079. int64_t cur_level = maybe_layer->layerId();
  14080. if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
  14081. return at::_ops::sparse_bsr_tensor_crow_col_value::call(crow_indices, col_indices, values, dtype, layout, device, pin_memory);
  14082. }
  14083. Tensor crow_indices_value;
  14084. optional<int64_t> crow_indices_bdim;
  14085. std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level);
  14086. Tensor col_indices_value;
  14087. optional<int64_t> col_indices_bdim;
  14088. std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level);
  14089. Tensor values_value;
  14090. optional<int64_t> values_bdim;
  14091. std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
  14092. auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory);
  14093. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14094. }
  14095. template <typename batch_rule_t, batch_rule_t batch_rule>
  14096. at::Tensor sparse_bsc_tensor_ccol_row_value_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
  14097. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14098. auto maybe_layer = maybeCurrentDynamicLayer();
  14099. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14100. int64_t cur_level = maybe_layer->layerId();
  14101. if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
  14102. return at::_ops::sparse_bsc_tensor_ccol_row_value::call(ccol_indices, row_indices, values, dtype, layout, device, pin_memory);
  14103. }
  14104. Tensor ccol_indices_value;
  14105. optional<int64_t> ccol_indices_bdim;
  14106. std::tie(ccol_indices_value, ccol_indices_bdim) = unwrapTensorAtLevel(ccol_indices, cur_level);
  14107. Tensor row_indices_value;
  14108. optional<int64_t> row_indices_bdim;
  14109. std::tie(row_indices_value, row_indices_bdim) = unwrapTensorAtLevel(row_indices, cur_level);
  14110. Tensor values_value;
  14111. optional<int64_t> values_bdim;
  14112. std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
  14113. auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory);
  14114. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14115. }
  14116. template <typename batch_rule_t, batch_rule_t batch_rule>
  14117. at::Tensor _sparse_compressed_tensor_unsafe_generated_plumbing(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
  14118. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14119. auto maybe_layer = maybeCurrentDynamicLayer();
  14120. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14121. int64_t cur_level = maybe_layer->layerId();
  14122. if (!isBatchedAtLevel(compressed_indices, cur_level) && !isBatchedAtLevel(plain_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
  14123. return at::_ops::_sparse_compressed_tensor_unsafe::call(compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory);
  14124. }
  14125. Tensor compressed_indices_value;
  14126. optional<int64_t> compressed_indices_bdim;
  14127. std::tie(compressed_indices_value, compressed_indices_bdim) = unwrapTensorAtLevel(compressed_indices, cur_level);
  14128. Tensor plain_indices_value;
  14129. optional<int64_t> plain_indices_bdim;
  14130. std::tie(plain_indices_value, plain_indices_bdim) = unwrapTensorAtLevel(plain_indices, cur_level);
  14131. Tensor values_value;
  14132. optional<int64_t> values_bdim;
  14133. std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
  14134. auto results = batch_rule(compressed_indices_value, compressed_indices_bdim, plain_indices_value, plain_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
  14135. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14136. }
  14137. template <typename batch_rule_t, batch_rule_t batch_rule>
  14138. at::Tensor _sparse_csr_tensor_unsafe_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
  14139. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14140. auto maybe_layer = maybeCurrentDynamicLayer();
  14141. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14142. int64_t cur_level = maybe_layer->layerId();
  14143. if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
  14144. return at::_ops::_sparse_csr_tensor_unsafe::call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
  14145. }
  14146. Tensor crow_indices_value;
  14147. optional<int64_t> crow_indices_bdim;
  14148. std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level);
  14149. Tensor col_indices_value;
  14150. optional<int64_t> col_indices_bdim;
  14151. std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level);
  14152. Tensor values_value;
  14153. optional<int64_t> values_bdim;
  14154. std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
  14155. auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
  14156. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14157. }
  14158. template <typename batch_rule_t, batch_rule_t batch_rule>
  14159. at::Tensor _sparse_csc_tensor_unsafe_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
  14160. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14161. auto maybe_layer = maybeCurrentDynamicLayer();
  14162. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14163. int64_t cur_level = maybe_layer->layerId();
  14164. if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
  14165. return at::_ops::_sparse_csc_tensor_unsafe::call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
  14166. }
  14167. Tensor ccol_indices_value;
  14168. optional<int64_t> ccol_indices_bdim;
  14169. std::tie(ccol_indices_value, ccol_indices_bdim) = unwrapTensorAtLevel(ccol_indices, cur_level);
  14170. Tensor row_indices_value;
  14171. optional<int64_t> row_indices_bdim;
  14172. std::tie(row_indices_value, row_indices_bdim) = unwrapTensorAtLevel(row_indices, cur_level);
  14173. Tensor values_value;
  14174. optional<int64_t> values_bdim;
  14175. std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
  14176. auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
  14177. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14178. }
  14179. template <typename batch_rule_t, batch_rule_t batch_rule>
  14180. at::Tensor _sparse_bsr_tensor_unsafe_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
  14181. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14182. auto maybe_layer = maybeCurrentDynamicLayer();
  14183. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14184. int64_t cur_level = maybe_layer->layerId();
  14185. if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
  14186. return at::_ops::_sparse_bsr_tensor_unsafe::call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
  14187. }
  14188. Tensor crow_indices_value;
  14189. optional<int64_t> crow_indices_bdim;
  14190. std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level);
  14191. Tensor col_indices_value;
  14192. optional<int64_t> col_indices_bdim;
  14193. std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level);
  14194. Tensor values_value;
  14195. optional<int64_t> values_bdim;
  14196. std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
  14197. auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
  14198. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14199. }
  14200. template <typename batch_rule_t, batch_rule_t batch_rule>
  14201. at::Tensor _sparse_bsc_tensor_unsafe_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
  14202. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14203. auto maybe_layer = maybeCurrentDynamicLayer();
  14204. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14205. int64_t cur_level = maybe_layer->layerId();
  14206. if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
  14207. return at::_ops::_sparse_bsc_tensor_unsafe::call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
  14208. }
  14209. Tensor ccol_indices_value;
  14210. optional<int64_t> ccol_indices_bdim;
  14211. std::tie(ccol_indices_value, ccol_indices_bdim) = unwrapTensorAtLevel(ccol_indices, cur_level);
  14212. Tensor row_indices_value;
  14213. optional<int64_t> row_indices_bdim;
  14214. std::tie(row_indices_value, row_indices_bdim) = unwrapTensorAtLevel(row_indices, cur_level);
  14215. Tensor values_value;
  14216. optional<int64_t> values_bdim;
  14217. std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
  14218. auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
  14219. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14220. }
  14221. template <typename batch_rule_t, batch_rule_t batch_rule>
  14222. at::Tensor sparse_coo_tensor_indices_generated_plumbing(const at::Tensor & indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
  14223. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14224. auto maybe_layer = maybeCurrentDynamicLayer();
  14225. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14226. int64_t cur_level = maybe_layer->layerId();
  14227. if (!isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
  14228. return at::_ops::sparse_coo_tensor_indices::call(indices, values, dtype, layout, device, pin_memory);
  14229. }
  14230. Tensor indices_value;
  14231. optional<int64_t> indices_bdim;
  14232. std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
  14233. Tensor values_value;
  14234. optional<int64_t> values_bdim;
  14235. std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
  14236. auto results = batch_rule(indices_value, indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory);
  14237. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14238. }
  14239. template <typename batch_rule_t, batch_rule_t batch_rule>
  14240. at::Tensor sparse_coo_tensor_indices_size_generated_plumbing(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
  14241. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14242. auto maybe_layer = maybeCurrentDynamicLayer();
  14243. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14244. int64_t cur_level = maybe_layer->layerId();
  14245. if (!isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
  14246. return at::_ops::sparse_coo_tensor_indices_size::call(indices, values, size, dtype, layout, device, pin_memory);
  14247. }
  14248. Tensor indices_value;
  14249. optional<int64_t> indices_bdim;
  14250. std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
  14251. Tensor values_value;
  14252. optional<int64_t> values_bdim;
  14253. std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
  14254. auto results = batch_rule(indices_value, indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
  14255. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14256. }
  14257. template <typename batch_rule_t, batch_rule_t batch_rule>
  14258. at::Tensor _sparse_coo_tensor_unsafe_generated_plumbing(const at::Tensor & indices, const at::Tensor & values, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
  14259. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14260. auto maybe_layer = maybeCurrentDynamicLayer();
  14261. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14262. int64_t cur_level = maybe_layer->layerId();
  14263. if (!isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
  14264. return at::_ops::_sparse_coo_tensor_unsafe::call(indices, values, size, dtype, layout, device, pin_memory);
  14265. }
  14266. Tensor indices_value;
  14267. optional<int64_t> indices_bdim;
  14268. std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
  14269. Tensor values_value;
  14270. optional<int64_t> values_bdim;
  14271. std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
  14272. auto results = batch_rule(indices_value, indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
  14273. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14274. }
  14275. template <typename batch_rule_t, batch_rule_t batch_rule>
  14276. void _validate_sparse_coo_tensor_args_generated_plumbing(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size) {
  14277. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14278. auto maybe_layer = maybeCurrentDynamicLayer();
  14279. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  14280. int64_t cur_level = maybe_layer->layerId();
  14281. if (!isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
  14282. return at::_ops::_validate_sparse_coo_tensor_args::call(indices, values, size);
  14283. }
  14284. Tensor indices_value;
  14285. optional<int64_t> indices_bdim;
  14286. std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
  14287. Tensor values_value;
  14288. optional<int64_t> values_bdim;
  14289. std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
  14290. batch_rule(indices_value, indices_bdim, values_value, values_bdim, size);
  14291. }
  14292. template <typename batch_rule_t, batch_rule_t batch_rule>
  14293. void _validate_sparse_compressed_tensor_args_generated_plumbing(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::Layout layout) {
  14294. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14295. auto maybe_layer = maybeCurrentDynamicLayer();
  14296. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  14297. int64_t cur_level = maybe_layer->layerId();
  14298. if (!isBatchedAtLevel(compressed_indices, cur_level) && !isBatchedAtLevel(plain_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
  14299. return at::_ops::_validate_sparse_compressed_tensor_args::call(compressed_indices, plain_indices, values, size, layout);
  14300. }
  14301. Tensor compressed_indices_value;
  14302. optional<int64_t> compressed_indices_bdim;
  14303. std::tie(compressed_indices_value, compressed_indices_bdim) = unwrapTensorAtLevel(compressed_indices, cur_level);
  14304. Tensor plain_indices_value;
  14305. optional<int64_t> plain_indices_bdim;
  14306. std::tie(plain_indices_value, plain_indices_bdim) = unwrapTensorAtLevel(plain_indices, cur_level);
  14307. Tensor values_value;
  14308. optional<int64_t> values_bdim;
  14309. std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
  14310. batch_rule(compressed_indices_value, compressed_indices_bdim, plain_indices_value, plain_indices_bdim, values_value, values_bdim, size, layout);
  14311. }
  14312. template <typename batch_rule_t, batch_rule_t batch_rule>
  14313. void _validate_sparse_csr_tensor_args_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size) {
  14314. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14315. auto maybe_layer = maybeCurrentDynamicLayer();
  14316. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  14317. int64_t cur_level = maybe_layer->layerId();
  14318. if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
  14319. return at::_ops::_validate_sparse_csr_tensor_args::call(crow_indices, col_indices, values, size);
  14320. }
  14321. Tensor crow_indices_value;
  14322. optional<int64_t> crow_indices_bdim;
  14323. std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level);
  14324. Tensor col_indices_value;
  14325. optional<int64_t> col_indices_bdim;
  14326. std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level);
  14327. Tensor values_value;
  14328. optional<int64_t> values_bdim;
  14329. std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
  14330. batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size);
  14331. }
  14332. template <typename batch_rule_t, batch_rule_t batch_rule>
  14333. void _validate_sparse_csc_tensor_args_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) {
  14334. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14335. auto maybe_layer = maybeCurrentDynamicLayer();
  14336. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  14337. int64_t cur_level = maybe_layer->layerId();
  14338. if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
  14339. return at::_ops::_validate_sparse_csc_tensor_args::call(ccol_indices, row_indices, values, size);
  14340. }
  14341. Tensor ccol_indices_value;
  14342. optional<int64_t> ccol_indices_bdim;
  14343. std::tie(ccol_indices_value, ccol_indices_bdim) = unwrapTensorAtLevel(ccol_indices, cur_level);
  14344. Tensor row_indices_value;
  14345. optional<int64_t> row_indices_bdim;
  14346. std::tie(row_indices_value, row_indices_bdim) = unwrapTensorAtLevel(row_indices, cur_level);
  14347. Tensor values_value;
  14348. optional<int64_t> values_bdim;
  14349. std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
  14350. batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size);
  14351. }
  14352. template <typename batch_rule_t, batch_rule_t batch_rule>
  14353. void _validate_sparse_bsr_tensor_args_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size) {
  14354. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14355. auto maybe_layer = maybeCurrentDynamicLayer();
  14356. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  14357. int64_t cur_level = maybe_layer->layerId();
  14358. if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
  14359. return at::_ops::_validate_sparse_bsr_tensor_args::call(crow_indices, col_indices, values, size);
  14360. }
  14361. Tensor crow_indices_value;
  14362. optional<int64_t> crow_indices_bdim;
  14363. std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level);
  14364. Tensor col_indices_value;
  14365. optional<int64_t> col_indices_bdim;
  14366. std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level);
  14367. Tensor values_value;
  14368. optional<int64_t> values_bdim;
  14369. std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
  14370. batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size);
  14371. }
  14372. template <typename batch_rule_t, batch_rule_t batch_rule>
  14373. void _validate_sparse_bsc_tensor_args_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) {
  14374. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14375. auto maybe_layer = maybeCurrentDynamicLayer();
  14376. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  14377. int64_t cur_level = maybe_layer->layerId();
  14378. if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
  14379. return at::_ops::_validate_sparse_bsc_tensor_args::call(ccol_indices, row_indices, values, size);
  14380. }
  14381. Tensor ccol_indices_value;
  14382. optional<int64_t> ccol_indices_bdim;
  14383. std::tie(ccol_indices_value, ccol_indices_bdim) = unwrapTensorAtLevel(ccol_indices, cur_level);
  14384. Tensor row_indices_value;
  14385. optional<int64_t> row_indices_bdim;
  14386. std::tie(row_indices_value, row_indices_bdim) = unwrapTensorAtLevel(row_indices, cur_level);
  14387. Tensor values_value;
  14388. optional<int64_t> values_bdim;
  14389. std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
  14390. batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size);
  14391. }
  14392. template <typename batch_rule_t, batch_rule_t batch_rule>
  14393. at::Tensor _sparse_coo_tensor_with_dims_and_tensors_generated_plumbing(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
  14394. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14395. auto maybe_layer = maybeCurrentDynamicLayer();
  14396. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14397. int64_t cur_level = maybe_layer->layerId();
  14398. if (!isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
  14399. return at::_ops::_sparse_coo_tensor_with_dims_and_tensors::call(sparse_dim, dense_dim, size, indices, values, dtype, layout, device, pin_memory);
  14400. }
  14401. Tensor indices_value;
  14402. optional<int64_t> indices_bdim;
  14403. std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
  14404. Tensor values_value;
  14405. optional<int64_t> values_bdim;
  14406. std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
  14407. auto results = batch_rule(sparse_dim, dense_dim, size, indices_value, indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory);
  14408. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14409. }
  14410. template <typename batch_rule_t, batch_rule_t batch_rule>
  14411. const at::Tensor & sparse_resize__generated_plumbing(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
  14412. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14413. auto maybe_layer = maybeCurrentDynamicLayer();
  14414. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  14415. int64_t cur_level = maybe_layer->layerId();
  14416. if (!isBatchedAtLevel(self, cur_level)) {
  14417. return at::_ops::sparse_resize_::call(self, size, sparse_dim, dense_dim);
  14418. }
  14419. Tensor self_value;
  14420. optional<int64_t> self_bdim;
  14421. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  14422. batch_rule(self_value, self_bdim, size, sparse_dim, dense_dim);
  14423. return self;
  14424. }
  14425. template <typename batch_rule_t, batch_rule_t batch_rule>
  14426. const at::Tensor & sparse_resize_and_clear__generated_plumbing(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
  14427. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14428. auto maybe_layer = maybeCurrentDynamicLayer();
  14429. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  14430. int64_t cur_level = maybe_layer->layerId();
  14431. if (!isBatchedAtLevel(self, cur_level)) {
  14432. return at::_ops::sparse_resize_and_clear_::call(self, size, sparse_dim, dense_dim);
  14433. }
  14434. Tensor self_value;
  14435. optional<int64_t> self_bdim;
  14436. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  14437. batch_rule(self_value, self_bdim, size, sparse_dim, dense_dim);
  14438. return self;
  14439. }
  14440. template <typename batch_rule_t, batch_rule_t batch_rule>
  14441. at::Tensor sparse_mask_generated_plumbing(const at::Tensor & self, const at::Tensor & mask) {
  14442. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14443. auto maybe_layer = maybeCurrentDynamicLayer();
  14444. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14445. int64_t cur_level = maybe_layer->layerId();
  14446. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
  14447. return at::_ops::sparse_mask::call(self, mask);
  14448. }
  14449. Tensor self_value;
  14450. optional<int64_t> self_bdim;
  14451. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  14452. Tensor mask_value;
  14453. optional<int64_t> mask_bdim;
  14454. std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
  14455. auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim);
  14456. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14457. }
  14458. template <typename batch_rule_t, batch_rule_t batch_rule>
  14459. ::std::vector<at::Tensor> _to_cpu_generated_plumbing(at::TensorList tensors) {
  14460. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14461. auto maybe_layer = maybeCurrentDynamicLayer();
  14462. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14463. int64_t cur_level = maybe_layer->layerId();
  14464. if (!isBatchedAtLevel(tensors, cur_level)) {
  14465. return at::_ops::_to_cpu::call(tensors);
  14466. }
  14467. auto results = batch_rule(tensors);
  14468. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  14469. }
  14470. template <typename batch_rule_t, batch_rule_t batch_rule>
  14471. at::Tensor to_dense_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
  14472. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14473. auto maybe_layer = maybeCurrentDynamicLayer();
  14474. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14475. int64_t cur_level = maybe_layer->layerId();
  14476. if (!isBatchedAtLevel(self, cur_level)) {
  14477. return at::_ops::to_dense::call(self, dtype);
  14478. }
  14479. Tensor self_value;
  14480. optional<int64_t> self_bdim;
  14481. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  14482. auto results = batch_rule(self_value, self_bdim, dtype);
  14483. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14484. }
  14485. template <typename batch_rule_t, batch_rule_t batch_rule>
  14486. at::Tensor _to_dense_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
  14487. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14488. auto maybe_layer = maybeCurrentDynamicLayer();
  14489. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14490. int64_t cur_level = maybe_layer->layerId();
  14491. if (!isBatchedAtLevel(self, cur_level)) {
  14492. return at::_ops::_to_dense::call(self, dtype);
  14493. }
  14494. Tensor self_value;
  14495. optional<int64_t> self_bdim;
  14496. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  14497. auto results = batch_rule(self_value, self_bdim, dtype);
  14498. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14499. }
  14500. template <typename batch_rule_t, batch_rule_t batch_rule>
  14501. at::Tensor to_dense_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & input) {
  14502. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14503. auto maybe_layer = maybeCurrentDynamicLayer();
  14504. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14505. int64_t cur_level = maybe_layer->layerId();
  14506. if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(input, cur_level)) {
  14507. return at::_ops::to_dense_backward::call(grad, input);
  14508. }
  14509. Tensor grad_value;
  14510. optional<int64_t> grad_bdim;
  14511. std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
  14512. Tensor input_value;
  14513. optional<int64_t> input_bdim;
  14514. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  14515. auto results = batch_rule(grad_value, grad_bdim, input_value, input_bdim);
  14516. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14517. }
  14518. template <typename batch_rule_t, batch_rule_t batch_rule>
  14519. at::Tensor coalesce_generated_plumbing(const at::Tensor & self) {
  14520. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14521. auto maybe_layer = maybeCurrentDynamicLayer();
  14522. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14523. int64_t cur_level = maybe_layer->layerId();
  14524. if (!isBatchedAtLevel(self, cur_level)) {
  14525. return at::_ops::coalesce::call(self);
  14526. }
  14527. Tensor self_value;
  14528. optional<int64_t> self_bdim;
  14529. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  14530. auto results = batch_rule(self_value, self_bdim);
  14531. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14532. }
  14533. template <typename batch_rule_t, batch_rule_t batch_rule>
  14534. at::Tensor _coalesce_generated_plumbing(const at::Tensor & self) {
  14535. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14536. auto maybe_layer = maybeCurrentDynamicLayer();
  14537. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14538. int64_t cur_level = maybe_layer->layerId();
  14539. if (!isBatchedAtLevel(self, cur_level)) {
  14540. return at::_ops::_coalesce::call(self);
  14541. }
  14542. Tensor self_value;
  14543. optional<int64_t> self_bdim;
  14544. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  14545. auto results = batch_rule(self_value, self_bdim);
  14546. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14547. }
  14548. template <typename batch_rule_t, batch_rule_t batch_rule>
  14549. at::Tensor _indices_generated_plumbing(const at::Tensor & self) {
  14550. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14551. auto maybe_layer = maybeCurrentDynamicLayer();
  14552. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14553. int64_t cur_level = maybe_layer->layerId();
  14554. if (!isBatchedAtLevel(self, cur_level)) {
  14555. return at::_ops::_indices::call(self);
  14556. }
  14557. Tensor self_value;
  14558. optional<int64_t> self_bdim;
  14559. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  14560. auto results = batch_rule(self_value, self_bdim);
  14561. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14562. }
  14563. template <typename batch_rule_t, batch_rule_t batch_rule>
  14564. at::Tensor _values_generated_plumbing(const at::Tensor & self) {
  14565. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14566. auto maybe_layer = maybeCurrentDynamicLayer();
  14567. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14568. int64_t cur_level = maybe_layer->layerId();
  14569. if (!isBatchedAtLevel(self, cur_level)) {
  14570. return at::_ops::_values::call(self);
  14571. }
  14572. Tensor self_value;
  14573. optional<int64_t> self_bdim;
  14574. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  14575. auto results = batch_rule(self_value, self_bdim);
  14576. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14577. }
  14578. template <typename batch_rule_t, batch_rule_t batch_rule>
  14579. at::Tensor & _coalesced__generated_plumbing(at::Tensor & self, bool coalesced) {
  14580. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14581. auto maybe_layer = maybeCurrentDynamicLayer();
  14582. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  14583. int64_t cur_level = maybe_layer->layerId();
  14584. if (!isBatchedAtLevel(self, cur_level)) {
  14585. return at::_ops::_coalesced_::call(self, coalesced);
  14586. }
  14587. Tensor self_value;
  14588. optional<int64_t> self_bdim;
  14589. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  14590. batch_rule(self_value, self_bdim, coalesced);
  14591. return self;
  14592. }
  14593. template <typename batch_rule_t, batch_rule_t batch_rule>
  14594. at::Tensor indices_generated_plumbing(const at::Tensor & self) {
  14595. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14596. auto maybe_layer = maybeCurrentDynamicLayer();
  14597. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14598. int64_t cur_level = maybe_layer->layerId();
  14599. if (!isBatchedAtLevel(self, cur_level)) {
  14600. return at::_ops::indices::call(self);
  14601. }
  14602. Tensor self_value;
  14603. optional<int64_t> self_bdim;
  14604. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  14605. auto results = batch_rule(self_value, self_bdim);
  14606. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14607. }
  14608. template <typename batch_rule_t, batch_rule_t batch_rule>
  14609. at::Tensor values_generated_plumbing(const at::Tensor & self) {
  14610. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14611. auto maybe_layer = maybeCurrentDynamicLayer();
  14612. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14613. int64_t cur_level = maybe_layer->layerId();
  14614. if (!isBatchedAtLevel(self, cur_level)) {
  14615. return at::_ops::values::call(self);
  14616. }
  14617. Tensor self_value;
  14618. optional<int64_t> self_bdim;
  14619. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  14620. auto results = batch_rule(self_value, self_bdim);
  14621. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14622. }
  14623. template <typename batch_rule_t, batch_rule_t batch_rule>
  14624. at::Tensor crow_indices_generated_plumbing(const at::Tensor & self) {
  14625. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14626. auto maybe_layer = maybeCurrentDynamicLayer();
  14627. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14628. int64_t cur_level = maybe_layer->layerId();
  14629. if (!isBatchedAtLevel(self, cur_level)) {
  14630. return at::_ops::crow_indices::call(self);
  14631. }
  14632. Tensor self_value;
  14633. optional<int64_t> self_bdim;
  14634. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  14635. auto results = batch_rule(self_value, self_bdim);
  14636. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14637. }
  14638. template <typename batch_rule_t, batch_rule_t batch_rule>
  14639. at::Tensor col_indices_generated_plumbing(const at::Tensor & self) {
  14640. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14641. auto maybe_layer = maybeCurrentDynamicLayer();
  14642. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14643. int64_t cur_level = maybe_layer->layerId();
  14644. if (!isBatchedAtLevel(self, cur_level)) {
  14645. return at::_ops::col_indices::call(self);
  14646. }
  14647. Tensor self_value;
  14648. optional<int64_t> self_bdim;
  14649. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  14650. auto results = batch_rule(self_value, self_bdim);
  14651. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14652. }
  14653. template <typename batch_rule_t, batch_rule_t batch_rule>
  14654. at::Tensor ccol_indices_generated_plumbing(const at::Tensor & self) {
  14655. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14656. auto maybe_layer = maybeCurrentDynamicLayer();
  14657. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14658. int64_t cur_level = maybe_layer->layerId();
  14659. if (!isBatchedAtLevel(self, cur_level)) {
  14660. return at::_ops::ccol_indices::call(self);
  14661. }
  14662. Tensor self_value;
  14663. optional<int64_t> self_bdim;
  14664. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  14665. auto results = batch_rule(self_value, self_bdim);
  14666. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14667. }
  14668. template <typename batch_rule_t, batch_rule_t batch_rule>
  14669. at::Tensor row_indices_generated_plumbing(const at::Tensor & self) {
  14670. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14671. auto maybe_layer = maybeCurrentDynamicLayer();
  14672. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14673. int64_t cur_level = maybe_layer->layerId();
  14674. if (!isBatchedAtLevel(self, cur_level)) {
  14675. return at::_ops::row_indices::call(self);
  14676. }
  14677. Tensor self_value;
  14678. optional<int64_t> self_bdim;
  14679. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  14680. auto results = batch_rule(self_value, self_bdim);
  14681. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14682. }
  14683. template <typename batch_rule_t, batch_rule_t batch_rule>
  14684. at::Tensor hspmm_generated_plumbing(const at::Tensor & mat1, const at::Tensor & mat2) {
  14685. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14686. auto maybe_layer = maybeCurrentDynamicLayer();
  14687. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14688. int64_t cur_level = maybe_layer->layerId();
  14689. if (!isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
  14690. return at::_ops::hspmm::call(mat1, mat2);
  14691. }
  14692. Tensor mat1_value;
  14693. optional<int64_t> mat1_bdim;
  14694. std::tie(mat1_value, mat1_bdim) = unwrapTensorAtLevel(mat1, cur_level);
  14695. Tensor mat2_value;
  14696. optional<int64_t> mat2_bdim;
  14697. std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
  14698. auto results = batch_rule(mat1_value, mat1_bdim, mat2_value, mat2_bdim);
  14699. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14700. }
  14701. template <typename batch_rule_t, batch_rule_t batch_rule>
  14702. at::Tensor & copy_sparse_to_sparse__generated_plumbing(at::Tensor & self, const at::Tensor & src, bool non_blocking) {
  14703. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14704. auto maybe_layer = maybeCurrentDynamicLayer();
  14705. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  14706. int64_t cur_level = maybe_layer->layerId();
  14707. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
  14708. return at::_ops::copy_sparse_to_sparse_::call(self, src, non_blocking);
  14709. }
  14710. Tensor self_value;
  14711. optional<int64_t> self_bdim;
  14712. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  14713. Tensor src_value;
  14714. optional<int64_t> src_bdim;
  14715. std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
  14716. batch_rule(self_value, self_bdim, src_value, src_bdim, non_blocking);
  14717. return self;
  14718. }
  14719. template <typename batch_rule_t, batch_rule_t batch_rule>
  14720. ::std::vector<at::Tensor> unbind_int_generated_plumbing(const at::Tensor & self, int64_t dim) {
  14721. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14722. auto maybe_layer = maybeCurrentDynamicLayer();
  14723. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14724. int64_t cur_level = maybe_layer->layerId();
  14725. if (!isBatchedAtLevel(self, cur_level)) {
  14726. return at::_ops::unbind_int::call(self, dim);
  14727. }
  14728. Tensor self_value;
  14729. optional<int64_t> self_bdim;
  14730. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  14731. auto results = batch_rule(self_value, self_bdim, dim);
  14732. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  14733. }
  14734. template <typename batch_rule_t, batch_rule_t batch_rule>
  14735. ::std::vector<at::Tensor> unbind_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim) {
  14736. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14737. auto maybe_layer = maybeCurrentDynamicLayer();
  14738. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14739. int64_t cur_level = maybe_layer->layerId();
  14740. if (!isBatchedAtLevel(self, cur_level)) {
  14741. return at::_ops::unbind_Dimname::call(self, dim);
  14742. }
  14743. Tensor self_value;
  14744. optional<int64_t> self_bdim;
  14745. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  14746. auto results = batch_rule(self_value, self_bdim, dim);
  14747. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  14748. }
  14749. template <typename batch_rule_t, batch_rule_t batch_rule>
  14750. at::Tensor to_sparse_sparse_dim_generated_plumbing(const at::Tensor & self, int64_t sparse_dim) {
  14751. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14752. auto maybe_layer = maybeCurrentDynamicLayer();
  14753. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14754. int64_t cur_level = maybe_layer->layerId();
  14755. if (!isBatchedAtLevel(self, cur_level)) {
  14756. return at::_ops::to_sparse_sparse_dim::call(self, sparse_dim);
  14757. }
  14758. Tensor self_value;
  14759. optional<int64_t> self_bdim;
  14760. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  14761. auto results = batch_rule(self_value, self_bdim, sparse_dim);
  14762. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14763. }
  14764. template <typename batch_rule_t, batch_rule_t batch_rule>
  14765. at::Tensor to_sparse_generated_plumbing(const at::Tensor & self, c10::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
  14766. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14767. auto maybe_layer = maybeCurrentDynamicLayer();
  14768. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14769. int64_t cur_level = maybe_layer->layerId();
  14770. if (!isBatchedAtLevel(self, cur_level)) {
  14771. return at::_ops::to_sparse::call(self, layout, blocksize, dense_dim);
  14772. }
  14773. Tensor self_value;
  14774. optional<int64_t> self_bdim;
  14775. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  14776. auto results = batch_rule(self_value, self_bdim, layout, blocksize, dense_dim);
  14777. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14778. }
  14779. template <typename batch_rule_t, batch_rule_t batch_rule>
  14780. at::Tensor to_sparse_csr_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> dense_dim) {
  14781. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14782. auto maybe_layer = maybeCurrentDynamicLayer();
  14783. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14784. int64_t cur_level = maybe_layer->layerId();
  14785. if (!isBatchedAtLevel(self, cur_level)) {
  14786. return at::_ops::to_sparse_csr::call(self, dense_dim);
  14787. }
  14788. Tensor self_value;
  14789. optional<int64_t> self_bdim;
  14790. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  14791. auto results = batch_rule(self_value, self_bdim, dense_dim);
  14792. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14793. }
  14794. template <typename batch_rule_t, batch_rule_t batch_rule>
  14795. at::Tensor to_sparse_csc_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> dense_dim) {
  14796. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14797. auto maybe_layer = maybeCurrentDynamicLayer();
  14798. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14799. int64_t cur_level = maybe_layer->layerId();
  14800. if (!isBatchedAtLevel(self, cur_level)) {
  14801. return at::_ops::to_sparse_csc::call(self, dense_dim);
  14802. }
  14803. Tensor self_value;
  14804. optional<int64_t> self_bdim;
  14805. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  14806. auto results = batch_rule(self_value, self_bdim, dense_dim);
  14807. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14808. }
  14809. template <typename batch_rule_t, batch_rule_t batch_rule>
  14810. at::Tensor to_sparse_bsr_generated_plumbing(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
  14811. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14812. auto maybe_layer = maybeCurrentDynamicLayer();
  14813. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14814. int64_t cur_level = maybe_layer->layerId();
  14815. if (!isBatchedAtLevel(self, cur_level)) {
  14816. return at::_ops::to_sparse_bsr::call(self, blocksize, dense_dim);
  14817. }
  14818. Tensor self_value;
  14819. optional<int64_t> self_bdim;
  14820. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  14821. auto results = batch_rule(self_value, self_bdim, blocksize, dense_dim);
  14822. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14823. }
  14824. template <typename batch_rule_t, batch_rule_t batch_rule>
  14825. at::Tensor to_sparse_bsc_generated_plumbing(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
  14826. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14827. auto maybe_layer = maybeCurrentDynamicLayer();
  14828. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14829. int64_t cur_level = maybe_layer->layerId();
  14830. if (!isBatchedAtLevel(self, cur_level)) {
  14831. return at::_ops::to_sparse_bsc::call(self, blocksize, dense_dim);
  14832. }
  14833. Tensor self_value;
  14834. optional<int64_t> self_bdim;
  14835. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  14836. auto results = batch_rule(self_value, self_bdim, blocksize, dense_dim);
  14837. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14838. }
  14839. template <typename batch_rule_t, batch_rule_t batch_rule>
  14840. at::Tensor to_mkldnn_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
  14841. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14842. auto maybe_layer = maybeCurrentDynamicLayer();
  14843. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14844. int64_t cur_level = maybe_layer->layerId();
  14845. if (!isBatchedAtLevel(self, cur_level)) {
  14846. return at::_ops::to_mkldnn::call(self, dtype);
  14847. }
  14848. Tensor self_value;
  14849. optional<int64_t> self_bdim;
  14850. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  14851. auto results = batch_rule(self_value, self_bdim, dtype);
  14852. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14853. }
  14854. template <typename batch_rule_t, batch_rule_t batch_rule>
  14855. at::Tensor mkldnn_reorder_conv2d_weight_generated_plumbing(const at::Tensor & self, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::OptionalIntArrayRef input_size) {
  14856. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14857. auto maybe_layer = maybeCurrentDynamicLayer();
  14858. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14859. int64_t cur_level = maybe_layer->layerId();
  14860. if (!isBatchedAtLevel(self, cur_level)) {
  14861. return at::_ops::mkldnn_reorder_conv2d_weight::call(self, padding, stride, dilation, groups, input_size);
  14862. }
  14863. Tensor self_value;
  14864. optional<int64_t> self_bdim;
  14865. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  14866. auto results = batch_rule(self_value, self_bdim, padding, stride, dilation, groups, input_size);
  14867. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14868. }
  14869. template <typename batch_rule_t, batch_rule_t batch_rule>
  14870. at::Tensor mkldnn_reorder_conv3d_weight_generated_plumbing(const at::Tensor & self, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) {
  14871. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14872. auto maybe_layer = maybeCurrentDynamicLayer();
  14873. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14874. int64_t cur_level = maybe_layer->layerId();
  14875. if (!isBatchedAtLevel(self, cur_level)) {
  14876. return at::_ops::mkldnn_reorder_conv3d_weight::call(self, padding, stride, dilation, groups);
  14877. }
  14878. Tensor self_value;
  14879. optional<int64_t> self_bdim;
  14880. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  14881. auto results = batch_rule(self_value, self_bdim, padding, stride, dilation, groups);
  14882. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14883. }
  14884. template <typename batch_rule_t, batch_rule_t batch_rule>
  14885. at::Tensor to_mkldnn_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & input) {
  14886. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14887. auto maybe_layer = maybeCurrentDynamicLayer();
  14888. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14889. int64_t cur_level = maybe_layer->layerId();
  14890. if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(input, cur_level)) {
  14891. return at::_ops::to_mkldnn_backward::call(grad, input);
  14892. }
  14893. Tensor grad_value;
  14894. optional<int64_t> grad_bdim;
  14895. std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
  14896. Tensor input_value;
  14897. optional<int64_t> input_bdim;
  14898. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  14899. auto results = batch_rule(grad_value, grad_bdim, input_value, input_bdim);
  14900. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14901. }
  14902. template <typename batch_rule_t, batch_rule_t batch_rule>
  14903. at::Tensor quantize_per_tensor_dynamic_generated_plumbing(const at::Tensor & self, at::ScalarType dtype, bool reduce_range) {
  14904. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14905. auto maybe_layer = maybeCurrentDynamicLayer();
  14906. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14907. int64_t cur_level = maybe_layer->layerId();
  14908. if (!isBatchedAtLevel(self, cur_level)) {
  14909. return at::_ops::quantize_per_tensor_dynamic::call(self, dtype, reduce_range);
  14910. }
  14911. Tensor self_value;
  14912. optional<int64_t> self_bdim;
  14913. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  14914. auto results = batch_rule(self_value, self_bdim, dtype, reduce_range);
  14915. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14916. }
  14917. template <typename batch_rule_t, batch_rule_t batch_rule>
  14918. at::Tensor quantize_per_tensor_generated_plumbing(const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype) {
  14919. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14920. auto maybe_layer = maybeCurrentDynamicLayer();
  14921. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14922. int64_t cur_level = maybe_layer->layerId();
  14923. if (!isBatchedAtLevel(self, cur_level)) {
  14924. return at::_ops::quantize_per_tensor::call(self, scale, zero_point, dtype);
  14925. }
  14926. Tensor self_value;
  14927. optional<int64_t> self_bdim;
  14928. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  14929. auto results = batch_rule(self_value, self_bdim, scale, zero_point, dtype);
  14930. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14931. }
  14932. template <typename batch_rule_t, batch_rule_t batch_rule>
  14933. at::Tensor quantize_per_tensor_tensor_qparams_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype) {
  14934. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14935. auto maybe_layer = maybeCurrentDynamicLayer();
  14936. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14937. int64_t cur_level = maybe_layer->layerId();
  14938. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
  14939. return at::_ops::quantize_per_tensor_tensor_qparams::call(self, scale, zero_point, dtype);
  14940. }
  14941. Tensor self_value;
  14942. optional<int64_t> self_bdim;
  14943. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  14944. Tensor scale_value;
  14945. optional<int64_t> scale_bdim;
  14946. std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
  14947. Tensor zero_point_value;
  14948. optional<int64_t> zero_point_bdim;
  14949. std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
  14950. auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, dtype);
  14951. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14952. }
  14953. template <typename batch_rule_t, batch_rule_t batch_rule>
  14954. ::std::vector<at::Tensor> quantize_per_tensor_tensors_generated_plumbing(at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype) {
  14955. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14956. auto maybe_layer = maybeCurrentDynamicLayer();
  14957. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14958. int64_t cur_level = maybe_layer->layerId();
  14959. if (!isBatchedAtLevel(tensors, cur_level) && !isBatchedAtLevel(scales, cur_level) && !isBatchedAtLevel(zero_points, cur_level)) {
  14960. return at::_ops::quantize_per_tensor_tensors::call(tensors, scales, zero_points, dtype);
  14961. }
  14962. Tensor scales_value;
  14963. optional<int64_t> scales_bdim;
  14964. std::tie(scales_value, scales_bdim) = unwrapTensorAtLevel(scales, cur_level);
  14965. Tensor zero_points_value;
  14966. optional<int64_t> zero_points_bdim;
  14967. std::tie(zero_points_value, zero_points_bdim) = unwrapTensorAtLevel(zero_points, cur_level);
  14968. auto results = batch_rule(tensors, scales_value, scales_bdim, zero_points_value, zero_points_bdim, dtype);
  14969. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  14970. }
  14971. template <typename batch_rule_t, batch_rule_t batch_rule>
  14972. at::Tensor quantize_per_channel_generated_plumbing(const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype) {
  14973. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14974. auto maybe_layer = maybeCurrentDynamicLayer();
  14975. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14976. int64_t cur_level = maybe_layer->layerId();
  14977. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scales, cur_level) && !isBatchedAtLevel(zero_points, cur_level)) {
  14978. return at::_ops::quantize_per_channel::call(self, scales, zero_points, axis, dtype);
  14979. }
  14980. Tensor self_value;
  14981. optional<int64_t> self_bdim;
  14982. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  14983. Tensor scales_value;
  14984. optional<int64_t> scales_bdim;
  14985. std::tie(scales_value, scales_bdim) = unwrapTensorAtLevel(scales, cur_level);
  14986. Tensor zero_points_value;
  14987. optional<int64_t> zero_points_bdim;
  14988. std::tie(zero_points_value, zero_points_bdim) = unwrapTensorAtLevel(zero_points, cur_level);
  14989. auto results = batch_rule(self_value, self_bdim, scales_value, scales_bdim, zero_points_value, zero_points_bdim, axis, dtype);
  14990. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  14991. }
  14992. template <typename batch_rule_t, batch_rule_t batch_rule>
  14993. at::Tensor dequantize_self_generated_plumbing(const at::Tensor & self) {
  14994. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  14995. auto maybe_layer = maybeCurrentDynamicLayer();
  14996. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  14997. int64_t cur_level = maybe_layer->layerId();
  14998. if (!isBatchedAtLevel(self, cur_level)) {
  14999. return at::_ops::dequantize_self::call(self);
  15000. }
  15001. Tensor self_value;
  15002. optional<int64_t> self_bdim;
  15003. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  15004. auto results = batch_rule(self_value, self_bdim);
  15005. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  15006. }
  15007. template <typename batch_rule_t, batch_rule_t batch_rule>
  15008. ::std::vector<at::Tensor> dequantize_tensors_generated_plumbing(at::TensorList tensors) {
  15009. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15010. auto maybe_layer = maybeCurrentDynamicLayer();
  15011. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15012. int64_t cur_level = maybe_layer->layerId();
  15013. if (!isBatchedAtLevel(tensors, cur_level)) {
  15014. return at::_ops::dequantize_tensors::call(tensors);
  15015. }
  15016. auto results = batch_rule(tensors);
  15017. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  15018. }
  15019. template <typename batch_rule_t, batch_rule_t batch_rule>
  15020. at::Tensor q_per_channel_scales_generated_plumbing(const at::Tensor & self) {
  15021. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15022. auto maybe_layer = maybeCurrentDynamicLayer();
  15023. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15024. int64_t cur_level = maybe_layer->layerId();
  15025. if (!isBatchedAtLevel(self, cur_level)) {
  15026. return at::_ops::q_per_channel_scales::call(self);
  15027. }
  15028. Tensor self_value;
  15029. optional<int64_t> self_bdim;
  15030. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  15031. auto results = batch_rule(self_value, self_bdim);
  15032. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  15033. }
  15034. template <typename batch_rule_t, batch_rule_t batch_rule>
  15035. at::Tensor q_per_channel_zero_points_generated_plumbing(const at::Tensor & self) {
  15036. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15037. auto maybe_layer = maybeCurrentDynamicLayer();
  15038. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15039. int64_t cur_level = maybe_layer->layerId();
  15040. if (!isBatchedAtLevel(self, cur_level)) {
  15041. return at::_ops::q_per_channel_zero_points::call(self);
  15042. }
  15043. Tensor self_value;
  15044. optional<int64_t> self_bdim;
  15045. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  15046. auto results = batch_rule(self_value, self_bdim);
  15047. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  15048. }
  15049. template <typename batch_rule_t, batch_rule_t batch_rule>
  15050. at::Tensor int_repr_generated_plumbing(const at::Tensor & self) {
  15051. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15052. auto maybe_layer = maybeCurrentDynamicLayer();
  15053. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15054. int64_t cur_level = maybe_layer->layerId();
  15055. if (!isBatchedAtLevel(self, cur_level)) {
  15056. return at::_ops::int_repr::call(self);
  15057. }
  15058. Tensor self_value;
  15059. optional<int64_t> self_bdim;
  15060. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  15061. auto results = batch_rule(self_value, self_bdim);
  15062. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  15063. }
  15064. template <typename batch_rule_t, batch_rule_t batch_rule>
  15065. at::Tensor _make_per_tensor_quantized_tensor_generated_plumbing(const at::Tensor & self, double scale, int64_t zero_point) {
  15066. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15067. auto maybe_layer = maybeCurrentDynamicLayer();
  15068. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15069. int64_t cur_level = maybe_layer->layerId();
  15070. if (!isBatchedAtLevel(self, cur_level)) {
  15071. return at::_ops::_make_per_tensor_quantized_tensor::call(self, scale, zero_point);
  15072. }
  15073. Tensor self_value;
  15074. optional<int64_t> self_bdim;
  15075. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  15076. auto results = batch_rule(self_value, self_bdim, scale, zero_point);
  15077. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  15078. }
  15079. template <typename batch_rule_t, batch_rule_t batch_rule>
  15080. at::Tensor _make_per_channel_quantized_tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis) {
  15081. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15082. auto maybe_layer = maybeCurrentDynamicLayer();
  15083. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15084. int64_t cur_level = maybe_layer->layerId();
  15085. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
  15086. return at::_ops::_make_per_channel_quantized_tensor::call(self, scale, zero_point, axis);
  15087. }
  15088. Tensor self_value;
  15089. optional<int64_t> self_bdim;
  15090. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  15091. Tensor scale_value;
  15092. optional<int64_t> scale_bdim;
  15093. std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
  15094. Tensor zero_point_value;
  15095. optional<int64_t> zero_point_bdim;
  15096. std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
  15097. auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, axis);
  15098. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  15099. }
  15100. template <typename batch_rule_t, batch_rule_t batch_rule>
  15101. at::Tensor fake_quantize_per_tensor_affine_generated_plumbing(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) {
  15102. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15103. auto maybe_layer = maybeCurrentDynamicLayer();
  15104. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15105. int64_t cur_level = maybe_layer->layerId();
  15106. if (!isBatchedAtLevel(self, cur_level)) {
  15107. return at::_ops::fake_quantize_per_tensor_affine::call(self, scale, zero_point, quant_min, quant_max);
  15108. }
  15109. Tensor self_value;
  15110. optional<int64_t> self_bdim;
  15111. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  15112. auto results = batch_rule(self_value, self_bdim, scale, zero_point, quant_min, quant_max);
  15113. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  15114. }
  15115. template <typename batch_rule_t, batch_rule_t batch_rule>
  15116. at::Tensor fake_quantize_per_tensor_affine_tensor_qparams_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max) {
  15117. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15118. auto maybe_layer = maybeCurrentDynamicLayer();
  15119. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15120. int64_t cur_level = maybe_layer->layerId();
  15121. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
  15122. return at::_ops::fake_quantize_per_tensor_affine_tensor_qparams::call(self, scale, zero_point, quant_min, quant_max);
  15123. }
  15124. Tensor self_value;
  15125. optional<int64_t> self_bdim;
  15126. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  15127. Tensor scale_value;
  15128. optional<int64_t> scale_bdim;
  15129. std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
  15130. Tensor zero_point_value;
  15131. optional<int64_t> zero_point_bdim;
  15132. std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
  15133. auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, quant_min, quant_max);
  15134. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  15135. }
  15136. template <typename batch_rule_t, batch_rule_t batch_rule>
  15137. ::std::tuple<at::Tensor,at::Tensor> fake_quantize_per_tensor_affine_cachemask_generated_plumbing(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) {
  15138. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15139. auto maybe_layer = maybeCurrentDynamicLayer();
  15140. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15141. int64_t cur_level = maybe_layer->layerId();
  15142. if (!isBatchedAtLevel(self, cur_level)) {
  15143. return at::_ops::fake_quantize_per_tensor_affine_cachemask::call(self, scale, zero_point, quant_min, quant_max);
  15144. }
  15145. Tensor self_value;
  15146. optional<int64_t> self_bdim;
  15147. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  15148. auto results = batch_rule(self_value, self_bdim, scale, zero_point, quant_min, quant_max);
  15149. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  15150. }
  15151. template <typename batch_rule_t, batch_rule_t batch_rule>
  15152. ::std::tuple<at::Tensor,at::Tensor> _fake_quantize_per_tensor_affine_cachemask_tensor_qparams_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max) {
  15153. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15154. auto maybe_layer = maybeCurrentDynamicLayer();
  15155. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15156. int64_t cur_level = maybe_layer->layerId();
  15157. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level) && !isBatchedAtLevel(fake_quant_enabled, cur_level)) {
  15158. return at::_ops::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams::call(self, scale, zero_point, fake_quant_enabled, quant_min, quant_max);
  15159. }
  15160. Tensor self_value;
  15161. optional<int64_t> self_bdim;
  15162. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  15163. Tensor scale_value;
  15164. optional<int64_t> scale_bdim;
  15165. std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
  15166. Tensor zero_point_value;
  15167. optional<int64_t> zero_point_bdim;
  15168. std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
  15169. Tensor fake_quant_enabled_value;
  15170. optional<int64_t> fake_quant_enabled_bdim;
  15171. std::tie(fake_quant_enabled_value, fake_quant_enabled_bdim) = unwrapTensorAtLevel(fake_quant_enabled, cur_level);
  15172. auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, fake_quant_enabled_value, fake_quant_enabled_bdim, quant_min, quant_max);
  15173. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  15174. }
  15175. template <typename batch_rule_t, batch_rule_t batch_rule>
  15176. at::Tensor fake_quantize_per_tensor_affine_cachemask_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & mask) {
  15177. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15178. auto maybe_layer = maybeCurrentDynamicLayer();
  15179. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15180. int64_t cur_level = maybe_layer->layerId();
  15181. if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
  15182. return at::_ops::fake_quantize_per_tensor_affine_cachemask_backward::call(grad, mask);
  15183. }
  15184. Tensor grad_value;
  15185. optional<int64_t> grad_bdim;
  15186. std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
  15187. Tensor mask_value;
  15188. optional<int64_t> mask_bdim;
  15189. std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
  15190. auto results = batch_rule(grad_value, grad_bdim, mask_value, mask_bdim);
  15191. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  15192. }
  15193. template <typename batch_rule_t, batch_rule_t batch_rule>
  15194. at::Tensor _fake_quantize_learnable_per_tensor_affine_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor) {
  15195. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15196. auto maybe_layer = maybeCurrentDynamicLayer();
  15197. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15198. int64_t cur_level = maybe_layer->layerId();
  15199. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
  15200. return at::_ops::_fake_quantize_learnable_per_tensor_affine::call(self, scale, zero_point, quant_min, quant_max, grad_factor);
  15201. }
  15202. Tensor self_value;
  15203. optional<int64_t> self_bdim;
  15204. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  15205. Tensor scale_value;
  15206. optional<int64_t> scale_bdim;
  15207. std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
  15208. Tensor zero_point_value;
  15209. optional<int64_t> zero_point_bdim;
  15210. std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
  15211. auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, quant_min, quant_max, grad_factor);
  15212. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  15213. }
  15214. template <typename batch_rule_t, batch_rule_t batch_rule>
  15215. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _fake_quantize_learnable_per_tensor_affine_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor) {
  15216. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15217. auto maybe_layer = maybeCurrentDynamicLayer();
  15218. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15219. int64_t cur_level = maybe_layer->layerId();
  15220. if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
  15221. return at::_ops::_fake_quantize_learnable_per_tensor_affine_backward::call(grad, self, scale, zero_point, quant_min, quant_max, grad_factor);
  15222. }
  15223. Tensor grad_value;
  15224. optional<int64_t> grad_bdim;
  15225. std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
  15226. Tensor self_value;
  15227. optional<int64_t> self_bdim;
  15228. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  15229. Tensor scale_value;
  15230. optional<int64_t> scale_bdim;
  15231. std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
  15232. Tensor zero_point_value;
  15233. optional<int64_t> zero_point_bdim;
  15234. std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
  15235. auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, quant_min, quant_max, grad_factor);
  15236. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  15237. }
  15238. template <typename batch_rule_t, batch_rule_t batch_rule>
  15239. at::Tensor fake_quantize_per_channel_affine_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) {
  15240. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15241. auto maybe_layer = maybeCurrentDynamicLayer();
  15242. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15243. int64_t cur_level = maybe_layer->layerId();
  15244. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
  15245. return at::_ops::fake_quantize_per_channel_affine::call(self, scale, zero_point, axis, quant_min, quant_max);
  15246. }
  15247. Tensor self_value;
  15248. optional<int64_t> self_bdim;
  15249. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  15250. Tensor scale_value;
  15251. optional<int64_t> scale_bdim;
  15252. std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
  15253. Tensor zero_point_value;
  15254. optional<int64_t> zero_point_bdim;
  15255. std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
  15256. auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, axis, quant_min, quant_max);
  15257. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  15258. }
  15259. template <typename batch_rule_t, batch_rule_t batch_rule>
  15260. ::std::tuple<at::Tensor,at::Tensor> fake_quantize_per_channel_affine_cachemask_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) {
  15261. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15262. auto maybe_layer = maybeCurrentDynamicLayer();
  15263. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15264. int64_t cur_level = maybe_layer->layerId();
  15265. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
  15266. return at::_ops::fake_quantize_per_channel_affine_cachemask::call(self, scale, zero_point, axis, quant_min, quant_max);
  15267. }
  15268. Tensor self_value;
  15269. optional<int64_t> self_bdim;
  15270. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  15271. Tensor scale_value;
  15272. optional<int64_t> scale_bdim;
  15273. std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
  15274. Tensor zero_point_value;
  15275. optional<int64_t> zero_point_bdim;
  15276. std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
  15277. auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, axis, quant_min, quant_max);
  15278. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  15279. }
  15280. template <typename batch_rule_t, batch_rule_t batch_rule>
  15281. at::Tensor fake_quantize_per_channel_affine_cachemask_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & mask) {
  15282. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15283. auto maybe_layer = maybeCurrentDynamicLayer();
  15284. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15285. int64_t cur_level = maybe_layer->layerId();
  15286. if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
  15287. return at::_ops::fake_quantize_per_channel_affine_cachemask_backward::call(grad, mask);
  15288. }
  15289. Tensor grad_value;
  15290. optional<int64_t> grad_bdim;
  15291. std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
  15292. Tensor mask_value;
  15293. optional<int64_t> mask_bdim;
  15294. std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
  15295. auto results = batch_rule(grad_value, grad_bdim, mask_value, mask_bdim);
  15296. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  15297. }
  15298. template <typename batch_rule_t, batch_rule_t batch_rule>
  15299. at::Tensor _fake_quantize_learnable_per_channel_affine_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor) {
  15300. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15301. auto maybe_layer = maybeCurrentDynamicLayer();
  15302. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15303. int64_t cur_level = maybe_layer->layerId();
  15304. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
  15305. return at::_ops::_fake_quantize_learnable_per_channel_affine::call(self, scale, zero_point, axis, quant_min, quant_max, grad_factor);
  15306. }
  15307. Tensor self_value;
  15308. optional<int64_t> self_bdim;
  15309. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  15310. Tensor scale_value;
  15311. optional<int64_t> scale_bdim;
  15312. std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
  15313. Tensor zero_point_value;
  15314. optional<int64_t> zero_point_bdim;
  15315. std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
  15316. auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, axis, quant_min, quant_max, grad_factor);
  15317. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  15318. }
  15319. template <typename batch_rule_t, batch_rule_t batch_rule>
  15320. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _fake_quantize_learnable_per_channel_affine_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor) {
  15321. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15322. auto maybe_layer = maybeCurrentDynamicLayer();
  15323. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15324. int64_t cur_level = maybe_layer->layerId();
  15325. if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
  15326. return at::_ops::_fake_quantize_learnable_per_channel_affine_backward::call(grad, self, scale, zero_point, axis, quant_min, quant_max, grad_factor);
  15327. }
  15328. Tensor grad_value;
  15329. optional<int64_t> grad_bdim;
  15330. std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
  15331. Tensor self_value;
  15332. optional<int64_t> self_bdim;
  15333. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  15334. Tensor scale_value;
  15335. optional<int64_t> scale_bdim;
  15336. std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
  15337. Tensor zero_point_value;
  15338. optional<int64_t> zero_point_bdim;
  15339. std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
  15340. auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, axis, quant_min, quant_max, grad_factor);
  15341. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  15342. }
  15343. template <typename batch_rule_t, batch_rule_t batch_rule>
  15344. at::Tensor _saturate_weight_to_fp16_generated_plumbing(const at::Tensor & weight) {
  15345. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15346. auto maybe_layer = maybeCurrentDynamicLayer();
  15347. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15348. int64_t cur_level = maybe_layer->layerId();
  15349. if (!isBatchedAtLevel(weight, cur_level)) {
  15350. return at::_ops::_saturate_weight_to_fp16::call(weight);
  15351. }
  15352. Tensor weight_value;
  15353. optional<int64_t> weight_bdim;
  15354. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  15355. auto results = batch_rule(weight_value, weight_bdim);
  15356. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  15357. }
  15358. template <typename batch_rule_t, batch_rule_t batch_rule>
  15359. ::std::tuple<at::Tensor,at::Tensor> choose_qparams_optimized_generated_plumbing(const at::Tensor & input, int64_t numel, int64_t n_bins, double ratio, int64_t bit_width) {
  15360. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15361. auto maybe_layer = maybeCurrentDynamicLayer();
  15362. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15363. int64_t cur_level = maybe_layer->layerId();
  15364. if (!isBatchedAtLevel(input, cur_level)) {
  15365. return at::_ops::choose_qparams_optimized::call(input, numel, n_bins, ratio, bit_width);
  15366. }
  15367. Tensor input_value;
  15368. optional<int64_t> input_bdim;
  15369. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  15370. auto results = batch_rule(input_value, input_bdim, numel, n_bins, ratio, bit_width);
  15371. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  15372. }
  15373. template <typename batch_rule_t, batch_rule_t batch_rule>
  15374. at::Tensor _autocast_to_reduced_precision_generated_plumbing(const at::Tensor & self, bool cuda_enabled, bool cpu_enabled, at::ScalarType cuda_dtype, at::ScalarType cpu_dtype) {
  15375. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15376. auto maybe_layer = maybeCurrentDynamicLayer();
  15377. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15378. int64_t cur_level = maybe_layer->layerId();
  15379. if (!isBatchedAtLevel(self, cur_level)) {
  15380. return at::_ops::_autocast_to_reduced_precision::call(self, cuda_enabled, cpu_enabled, cuda_dtype, cpu_dtype);
  15381. }
  15382. Tensor self_value;
  15383. optional<int64_t> self_bdim;
  15384. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  15385. auto results = batch_rule(self_value, self_bdim, cuda_enabled, cpu_enabled, cuda_dtype, cpu_dtype);
  15386. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  15387. }
  15388. template <typename batch_rule_t, batch_rule_t batch_rule>
  15389. at::Tensor _autocast_to_full_precision_generated_plumbing(const at::Tensor & self, bool cuda_enabled, bool cpu_enabled) {
  15390. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15391. auto maybe_layer = maybeCurrentDynamicLayer();
  15392. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15393. int64_t cur_level = maybe_layer->layerId();
  15394. if (!isBatchedAtLevel(self, cur_level)) {
  15395. return at::_ops::_autocast_to_full_precision::call(self, cuda_enabled, cpu_enabled);
  15396. }
  15397. Tensor self_value;
  15398. optional<int64_t> self_bdim;
  15399. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  15400. auto results = batch_rule(self_value, self_bdim, cuda_enabled, cpu_enabled);
  15401. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  15402. }
  15403. template <typename batch_rule_t, batch_rule_t batch_rule>
  15404. at::Tensor _to_copy_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, bool non_blocking, c10::optional<at::MemoryFormat> memory_format) {
  15405. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15406. auto maybe_layer = maybeCurrentDynamicLayer();
  15407. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15408. int64_t cur_level = maybe_layer->layerId();
  15409. if (!isBatchedAtLevel(self, cur_level)) {
  15410. return at::_ops::_to_copy::call(self, dtype, layout, device, pin_memory, non_blocking, memory_format);
  15411. }
  15412. Tensor self_value;
  15413. optional<int64_t> self_bdim;
  15414. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  15415. auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, non_blocking, memory_format);
  15416. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  15417. }
  15418. template <typename batch_rule_t, batch_rule_t batch_rule>
  15419. at::Tensor to_dtype_layout_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) {
  15420. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15421. auto maybe_layer = maybeCurrentDynamicLayer();
  15422. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15423. int64_t cur_level = maybe_layer->layerId();
  15424. if (!isBatchedAtLevel(self, cur_level)) {
  15425. return at::_ops::to_dtype_layout::call(self, dtype, layout, device, pin_memory, non_blocking, copy, memory_format);
  15426. }
  15427. Tensor self_value;
  15428. optional<int64_t> self_bdim;
  15429. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  15430. auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, non_blocking, copy, memory_format);
  15431. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  15432. }
  15433. template <typename batch_rule_t, batch_rule_t batch_rule>
  15434. at::Tensor to_device_generated_plumbing(const at::Tensor & self, at::Device device, at::ScalarType dtype, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) {
  15435. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15436. auto maybe_layer = maybeCurrentDynamicLayer();
  15437. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15438. int64_t cur_level = maybe_layer->layerId();
  15439. if (!isBatchedAtLevel(self, cur_level)) {
  15440. return at::_ops::to_device::call(self, device, dtype, non_blocking, copy, memory_format);
  15441. }
  15442. Tensor self_value;
  15443. optional<int64_t> self_bdim;
  15444. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  15445. auto results = batch_rule(self_value, self_bdim, device, dtype, non_blocking, copy, memory_format);
  15446. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  15447. }
  15448. template <typename batch_rule_t, batch_rule_t batch_rule>
  15449. at::Tensor to_dtype_generated_plumbing(const at::Tensor & self, at::ScalarType dtype, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) {
  15450. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15451. auto maybe_layer = maybeCurrentDynamicLayer();
  15452. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15453. int64_t cur_level = maybe_layer->layerId();
  15454. if (!isBatchedAtLevel(self, cur_level)) {
  15455. return at::_ops::to_dtype::call(self, dtype, non_blocking, copy, memory_format);
  15456. }
  15457. Tensor self_value;
  15458. optional<int64_t> self_bdim;
  15459. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  15460. auto results = batch_rule(self_value, self_bdim, dtype, non_blocking, copy, memory_format);
  15461. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  15462. }
  15463. template <typename batch_rule_t, batch_rule_t batch_rule>
  15464. at::Tensor to_other_generated_plumbing(const at::Tensor & self, const at::Tensor & other, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) {
  15465. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15466. auto maybe_layer = maybeCurrentDynamicLayer();
  15467. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15468. int64_t cur_level = maybe_layer->layerId();
  15469. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  15470. return at::_ops::to_other::call(self, other, non_blocking, copy, memory_format);
  15471. }
  15472. Tensor self_value;
  15473. optional<int64_t> self_bdim;
  15474. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  15475. Tensor other_value;
  15476. optional<int64_t> other_bdim;
  15477. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  15478. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, non_blocking, copy, memory_format);
  15479. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  15480. }
  15481. template <typename batch_rule_t, batch_rule_t batch_rule>
  15482. ::std::vector<at::Tensor> meshgrid_generated_plumbing(at::TensorList tensors) {
  15483. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15484. auto maybe_layer = maybeCurrentDynamicLayer();
  15485. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15486. int64_t cur_level = maybe_layer->layerId();
  15487. if (!isBatchedAtLevel(tensors, cur_level)) {
  15488. return at::_ops::meshgrid::call(tensors);
  15489. }
  15490. auto results = batch_rule(tensors);
  15491. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  15492. }
  15493. template <typename batch_rule_t, batch_rule_t batch_rule>
  15494. ::std::vector<at::Tensor> meshgrid_indexing_generated_plumbing(at::TensorList tensors, c10::string_view indexing) {
  15495. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15496. auto maybe_layer = maybeCurrentDynamicLayer();
  15497. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15498. int64_t cur_level = maybe_layer->layerId();
  15499. if (!isBatchedAtLevel(tensors, cur_level)) {
  15500. return at::_ops::meshgrid_indexing::call(tensors, indexing);
  15501. }
  15502. auto results = batch_rule(tensors, indexing);
  15503. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  15504. }
  15505. template <typename batch_rule_t, batch_rule_t batch_rule>
  15506. at::Tensor cartesian_prod_generated_plumbing(at::TensorList tensors) {
  15507. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15508. auto maybe_layer = maybeCurrentDynamicLayer();
  15509. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15510. int64_t cur_level = maybe_layer->layerId();
  15511. if (!isBatchedAtLevel(tensors, cur_level)) {
  15512. return at::_ops::cartesian_prod::call(tensors);
  15513. }
  15514. auto results = batch_rule(tensors);
  15515. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  15516. }
  15517. template <typename batch_rule_t, batch_rule_t batch_rule>
  15518. at::Tensor combinations_generated_plumbing(const at::Tensor & self, int64_t r, bool with_replacement) {
  15519. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15520. auto maybe_layer = maybeCurrentDynamicLayer();
  15521. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15522. int64_t cur_level = maybe_layer->layerId();
  15523. if (!isBatchedAtLevel(self, cur_level)) {
  15524. return at::_ops::combinations::call(self, r, with_replacement);
  15525. }
  15526. Tensor self_value;
  15527. optional<int64_t> self_bdim;
  15528. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  15529. auto results = batch_rule(self_value, self_bdim, r, with_replacement);
  15530. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  15531. }
  15532. template <typename batch_rule_t, batch_rule_t batch_rule>
  15533. ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _lstm_mps_generated_plumbing(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
  15534. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15535. auto maybe_layer = maybeCurrentDynamicLayer();
  15536. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15537. int64_t cur_level = maybe_layer->layerId();
  15538. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
  15539. return at::_ops::_lstm_mps::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
  15540. }
  15541. Tensor input_value;
  15542. optional<int64_t> input_bdim;
  15543. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  15544. auto results = batch_rule(input_value, input_bdim, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
  15545. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level), makeBatched(std::get<10>(results), std::get<11>(results), cur_level));
  15546. }
  15547. template <typename batch_rule_t, batch_rule_t batch_rule>
  15548. ::std::tuple<at::Tensor,::std::vector<at::Tensor>,::std::vector<at::Tensor>> lstm_mps_backward_generated_plumbing(const at::Tensor & grad_y, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
  15549. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15550. auto maybe_layer = maybeCurrentDynamicLayer();
  15551. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15552. int64_t cur_level = maybe_layer->layerId();
  15553. if (!isBatchedAtLevel(grad_y, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(z_state, cur_level) && !isBatchedAtLevel(cell_state_fwd, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(layersOutputs, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
  15554. return at::_ops::lstm_mps_backward::call(grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, layersOutputs, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
  15555. }
  15556. Tensor grad_y_value;
  15557. optional<int64_t> grad_y_bdim;
  15558. std::tie(grad_y_value, grad_y_bdim) = unwrapTensorAtLevel(grad_y, cur_level);
  15559. Tensor z_state_value;
  15560. optional<int64_t> z_state_bdim;
  15561. std::tie(z_state_value, z_state_bdim) = unwrapTensorAtLevel(z_state, cur_level);
  15562. Tensor cell_state_fwd_value;
  15563. optional<int64_t> cell_state_fwd_bdim;
  15564. std::tie(cell_state_fwd_value, cell_state_fwd_bdim) = unwrapTensorAtLevel(cell_state_fwd, cur_level);
  15565. Tensor input_value;
  15566. optional<int64_t> input_bdim;
  15567. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  15568. Tensor layersOutputs_value;
  15569. optional<int64_t> layersOutputs_bdim;
  15570. std::tie(layersOutputs_value, layersOutputs_bdim) = unwrapTensorAtLevel(layersOutputs, cur_level);
  15571. optional<Tensor> grad_hy_value;
  15572. optional<int64_t> grad_hy_bdim;
  15573. if (grad_hy) {
  15574. std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
  15575. }
  15576. optional<Tensor> grad_cy_value;
  15577. optional<int64_t> grad_cy_bdim;
  15578. if (grad_cy) {
  15579. std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
  15580. }
  15581. auto results = batch_rule(grad_y_value, grad_y_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, z_state_value, z_state_bdim, cell_state_fwd_value, cell_state_fwd_bdim, input_value, input_bdim, layersOutputs_value, layersOutputs_bdim, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
  15582. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level), makeBatchedVector(std::get<4>(results), std::get<5>(results), cur_level));
  15583. }
  15584. template <typename batch_rule_t, batch_rule_t batch_rule>
  15585. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _thnn_fused_lstm_cell_generated_plumbing(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias) {
  15586. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15587. auto maybe_layer = maybeCurrentDynamicLayer();
  15588. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15589. int64_t cur_level = maybe_layer->layerId();
  15590. if (!isBatchedAtLevel(input_gates, cur_level) && !isBatchedAtLevel(hidden_gates, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(input_bias, cur_level) && !isBatchedAtLevel(hidden_bias, cur_level)) {
  15591. return at::_ops::_thnn_fused_lstm_cell::call(input_gates, hidden_gates, cx, input_bias, hidden_bias);
  15592. }
  15593. Tensor input_gates_value;
  15594. optional<int64_t> input_gates_bdim;
  15595. std::tie(input_gates_value, input_gates_bdim) = unwrapTensorAtLevel(input_gates, cur_level);
  15596. Tensor hidden_gates_value;
  15597. optional<int64_t> hidden_gates_bdim;
  15598. std::tie(hidden_gates_value, hidden_gates_bdim) = unwrapTensorAtLevel(hidden_gates, cur_level);
  15599. Tensor cx_value;
  15600. optional<int64_t> cx_bdim;
  15601. std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx, cur_level);
  15602. optional<Tensor> input_bias_value;
  15603. optional<int64_t> input_bias_bdim;
  15604. if (input_bias) {
  15605. std::tie(input_bias_value, input_bias_bdim) = unwrapTensorAtLevel(input_bias.value(), cur_level);
  15606. }
  15607. optional<Tensor> hidden_bias_value;
  15608. optional<int64_t> hidden_bias_bdim;
  15609. if (hidden_bias) {
  15610. std::tie(hidden_bias_value, hidden_bias_bdim) = unwrapTensorAtLevel(hidden_bias.value(), cur_level);
  15611. }
  15612. auto results = batch_rule(input_gates_value, input_gates_bdim, hidden_gates_value, hidden_gates_bdim, cx_value, cx_bdim, input_bias_value, input_bias_bdim, hidden_bias_value, hidden_bias_bdim);
  15613. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  15614. }
  15615. template <typename batch_rule_t, batch_rule_t batch_rule>
  15616. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _thnn_fused_lstm_cell_backward_impl_generated_plumbing(const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) {
  15617. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15618. auto maybe_layer = maybeCurrentDynamicLayer();
  15619. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15620. int64_t cur_level = maybe_layer->layerId();
  15621. if (!isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(cy, cur_level) && !isBatchedAtLevel(workspace, cur_level)) {
  15622. return at::_ops::_thnn_fused_lstm_cell_backward_impl::call(grad_hy, grad_cy, cx, cy, workspace, has_bias);
  15623. }
  15624. Tensor cx_value;
  15625. optional<int64_t> cx_bdim;
  15626. std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx, cur_level);
  15627. Tensor cy_value;
  15628. optional<int64_t> cy_bdim;
  15629. std::tie(cy_value, cy_bdim) = unwrapTensorAtLevel(cy, cur_level);
  15630. Tensor workspace_value;
  15631. optional<int64_t> workspace_bdim;
  15632. std::tie(workspace_value, workspace_bdim) = unwrapTensorAtLevel(workspace, cur_level);
  15633. optional<Tensor> grad_hy_value;
  15634. optional<int64_t> grad_hy_bdim;
  15635. if (grad_hy) {
  15636. std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
  15637. }
  15638. optional<Tensor> grad_cy_value;
  15639. optional<int64_t> grad_cy_bdim;
  15640. if (grad_cy) {
  15641. std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
  15642. }
  15643. auto results = batch_rule(grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, cx_value, cx_bdim, cy_value, cy_bdim, workspace_value, workspace_bdim, has_bias);
  15644. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  15645. }
  15646. template <typename batch_rule_t, batch_rule_t batch_rule>
  15647. ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_fused_lstm_cell_backward_generated_plumbing(const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) {
  15648. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15649. auto maybe_layer = maybeCurrentDynamicLayer();
  15650. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15651. int64_t cur_level = maybe_layer->layerId();
  15652. if (!isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(cy, cur_level) && !isBatchedAtLevel(workspace, cur_level)) {
  15653. return at::_ops::_thnn_fused_lstm_cell_backward::call(grad_hy, grad_cy, cx, cy, workspace, has_bias);
  15654. }
  15655. Tensor cx_value;
  15656. optional<int64_t> cx_bdim;
  15657. std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx, cur_level);
  15658. Tensor cy_value;
  15659. optional<int64_t> cy_bdim;
  15660. std::tie(cy_value, cy_bdim) = unwrapTensorAtLevel(cy, cur_level);
  15661. Tensor workspace_value;
  15662. optional<int64_t> workspace_bdim;
  15663. std::tie(workspace_value, workspace_bdim) = unwrapTensorAtLevel(workspace, cur_level);
  15664. optional<Tensor> grad_hy_value;
  15665. optional<int64_t> grad_hy_bdim;
  15666. if (grad_hy) {
  15667. std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
  15668. }
  15669. optional<Tensor> grad_cy_value;
  15670. optional<int64_t> grad_cy_bdim;
  15671. if (grad_cy) {
  15672. std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
  15673. }
  15674. auto results = batch_rule(grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, cx_value, cx_bdim, cy_value, cy_bdim, workspace_value, workspace_bdim, has_bias);
  15675. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level));
  15676. }
  15677. template <typename batch_rule_t, batch_rule_t batch_rule>
  15678. ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_differentiable_lstm_cell_backward_generated_plumbing(const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias, const at::Tensor & cx, const at::Tensor & cy) {
  15679. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15680. auto maybe_layer = maybeCurrentDynamicLayer();
  15681. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15682. int64_t cur_level = maybe_layer->layerId();
  15683. if (!isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(input_gates, cur_level) && !isBatchedAtLevel(hidden_gates, cur_level) && !isBatchedAtLevel(input_bias, cur_level) && !isBatchedAtLevel(hidden_bias, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(cy, cur_level)) {
  15684. return at::_ops::_thnn_differentiable_lstm_cell_backward::call(grad_hy, grad_cy, input_gates, hidden_gates, input_bias, hidden_bias, cx, cy);
  15685. }
  15686. Tensor input_gates_value;
  15687. optional<int64_t> input_gates_bdim;
  15688. std::tie(input_gates_value, input_gates_bdim) = unwrapTensorAtLevel(input_gates, cur_level);
  15689. Tensor hidden_gates_value;
  15690. optional<int64_t> hidden_gates_bdim;
  15691. std::tie(hidden_gates_value, hidden_gates_bdim) = unwrapTensorAtLevel(hidden_gates, cur_level);
  15692. Tensor cx_value;
  15693. optional<int64_t> cx_bdim;
  15694. std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx, cur_level);
  15695. Tensor cy_value;
  15696. optional<int64_t> cy_bdim;
  15697. std::tie(cy_value, cy_bdim) = unwrapTensorAtLevel(cy, cur_level);
  15698. optional<Tensor> grad_hy_value;
  15699. optional<int64_t> grad_hy_bdim;
  15700. if (grad_hy) {
  15701. std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
  15702. }
  15703. optional<Tensor> grad_cy_value;
  15704. optional<int64_t> grad_cy_bdim;
  15705. if (grad_cy) {
  15706. std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
  15707. }
  15708. optional<Tensor> input_bias_value;
  15709. optional<int64_t> input_bias_bdim;
  15710. if (input_bias) {
  15711. std::tie(input_bias_value, input_bias_bdim) = unwrapTensorAtLevel(input_bias.value(), cur_level);
  15712. }
  15713. optional<Tensor> hidden_bias_value;
  15714. optional<int64_t> hidden_bias_bdim;
  15715. if (hidden_bias) {
  15716. std::tie(hidden_bias_value, hidden_bias_bdim) = unwrapTensorAtLevel(hidden_bias.value(), cur_level);
  15717. }
  15718. auto results = batch_rule(grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, input_gates_value, input_gates_bdim, hidden_gates_value, hidden_gates_bdim, input_bias_value, input_bias_bdim, hidden_bias_value, hidden_bias_bdim, cx_value, cx_bdim, cy_value, cy_bdim);
  15719. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level));
  15720. }
  15721. template <typename batch_rule_t, batch_rule_t batch_rule>
  15722. ::std::tuple<at::Tensor,at::Tensor> _thnn_fused_gru_cell_generated_plumbing(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias) {
  15723. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15724. auto maybe_layer = maybeCurrentDynamicLayer();
  15725. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15726. int64_t cur_level = maybe_layer->layerId();
  15727. if (!isBatchedAtLevel(input_gates, cur_level) && !isBatchedAtLevel(hidden_gates, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(input_bias, cur_level) && !isBatchedAtLevel(hidden_bias, cur_level)) {
  15728. return at::_ops::_thnn_fused_gru_cell::call(input_gates, hidden_gates, hx, input_bias, hidden_bias);
  15729. }
  15730. Tensor input_gates_value;
  15731. optional<int64_t> input_gates_bdim;
  15732. std::tie(input_gates_value, input_gates_bdim) = unwrapTensorAtLevel(input_gates, cur_level);
  15733. Tensor hidden_gates_value;
  15734. optional<int64_t> hidden_gates_bdim;
  15735. std::tie(hidden_gates_value, hidden_gates_bdim) = unwrapTensorAtLevel(hidden_gates, cur_level);
  15736. Tensor hx_value;
  15737. optional<int64_t> hx_bdim;
  15738. std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
  15739. optional<Tensor> input_bias_value;
  15740. optional<int64_t> input_bias_bdim;
  15741. if (input_bias) {
  15742. std::tie(input_bias_value, input_bias_bdim) = unwrapTensorAtLevel(input_bias.value(), cur_level);
  15743. }
  15744. optional<Tensor> hidden_bias_value;
  15745. optional<int64_t> hidden_bias_bdim;
  15746. if (hidden_bias) {
  15747. std::tie(hidden_bias_value, hidden_bias_bdim) = unwrapTensorAtLevel(hidden_bias.value(), cur_level);
  15748. }
  15749. auto results = batch_rule(input_gates_value, input_gates_bdim, hidden_gates_value, hidden_gates_bdim, hx_value, hx_bdim, input_bias_value, input_bias_bdim, hidden_bias_value, hidden_bias_bdim);
  15750. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  15751. }
  15752. template <typename batch_rule_t, batch_rule_t batch_rule>
  15753. ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_fused_gru_cell_backward_generated_plumbing(const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias) {
  15754. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15755. auto maybe_layer = maybeCurrentDynamicLayer();
  15756. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15757. int64_t cur_level = maybe_layer->layerId();
  15758. if (!isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(workspace, cur_level)) {
  15759. return at::_ops::_thnn_fused_gru_cell_backward::call(grad_hy, workspace, has_bias);
  15760. }
  15761. Tensor grad_hy_value;
  15762. optional<int64_t> grad_hy_bdim;
  15763. std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy, cur_level);
  15764. Tensor workspace_value;
  15765. optional<int64_t> workspace_bdim;
  15766. std::tie(workspace_value, workspace_bdim) = unwrapTensorAtLevel(workspace, cur_level);
  15767. auto results = batch_rule(grad_hy_value, grad_hy_bdim, workspace_value, workspace_bdim, has_bias);
  15768. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level));
  15769. }
  15770. template <typename batch_rule_t, batch_rule_t batch_rule>
  15771. ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_differentiable_gru_cell_backward_generated_plumbing(const at::Tensor & grad_hy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias) {
  15772. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15773. auto maybe_layer = maybeCurrentDynamicLayer();
  15774. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15775. int64_t cur_level = maybe_layer->layerId();
  15776. if (!isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(input_gates, cur_level) && !isBatchedAtLevel(hidden_gates, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(input_bias, cur_level) && !isBatchedAtLevel(hidden_bias, cur_level)) {
  15777. return at::_ops::_thnn_differentiable_gru_cell_backward::call(grad_hy, input_gates, hidden_gates, hx, input_bias, hidden_bias);
  15778. }
  15779. Tensor grad_hy_value;
  15780. optional<int64_t> grad_hy_bdim;
  15781. std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy, cur_level);
  15782. Tensor input_gates_value;
  15783. optional<int64_t> input_gates_bdim;
  15784. std::tie(input_gates_value, input_gates_bdim) = unwrapTensorAtLevel(input_gates, cur_level);
  15785. Tensor hidden_gates_value;
  15786. optional<int64_t> hidden_gates_bdim;
  15787. std::tie(hidden_gates_value, hidden_gates_bdim) = unwrapTensorAtLevel(hidden_gates, cur_level);
  15788. Tensor hx_value;
  15789. optional<int64_t> hx_bdim;
  15790. std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
  15791. optional<Tensor> input_bias_value;
  15792. optional<int64_t> input_bias_bdim;
  15793. if (input_bias) {
  15794. std::tie(input_bias_value, input_bias_bdim) = unwrapTensorAtLevel(input_bias.value(), cur_level);
  15795. }
  15796. optional<Tensor> hidden_bias_value;
  15797. optional<int64_t> hidden_bias_bdim;
  15798. if (hidden_bias) {
  15799. std::tie(hidden_bias_value, hidden_bias_bdim) = unwrapTensorAtLevel(hidden_bias.value(), cur_level);
  15800. }
  15801. auto results = batch_rule(grad_hy_value, grad_hy_bdim, input_gates_value, input_gates_bdim, hidden_gates_value, hidden_gates_bdim, hx_value, hx_bdim, input_bias_value, input_bias_bdim, hidden_bias_value, hidden_bias_bdim);
  15802. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level));
  15803. }
  15804. template <typename batch_rule_t, batch_rule_t batch_rule>
  15805. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> lstm_input_generated_plumbing(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
  15806. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15807. auto maybe_layer = maybeCurrentDynamicLayer();
  15808. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15809. int64_t cur_level = maybe_layer->layerId();
  15810. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
  15811. return at::_ops::lstm_input::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
  15812. }
  15813. Tensor input_value;
  15814. optional<int64_t> input_bdim;
  15815. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  15816. auto results = batch_rule(input_value, input_bdim, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
  15817. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  15818. }
  15819. template <typename batch_rule_t, batch_rule_t batch_rule>
  15820. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> lstm_data_generated_plumbing(const at::Tensor & data, const at::Tensor & batch_sizes, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
  15821. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15822. auto maybe_layer = maybeCurrentDynamicLayer();
  15823. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15824. int64_t cur_level = maybe_layer->layerId();
  15825. if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
  15826. return at::_ops::lstm_data::call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
  15827. }
  15828. Tensor data_value;
  15829. optional<int64_t> data_bdim;
  15830. std::tie(data_value, data_bdim) = unwrapTensorAtLevel(data, cur_level);
  15831. Tensor batch_sizes_value;
  15832. optional<int64_t> batch_sizes_bdim;
  15833. std::tie(batch_sizes_value, batch_sizes_bdim) = unwrapTensorAtLevel(batch_sizes, cur_level);
  15834. auto results = batch_rule(data_value, data_bdim, batch_sizes_value, batch_sizes_bdim, hx, params, has_biases, num_layers, dropout, train, bidirectional);
  15835. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  15836. }
  15837. template <typename batch_rule_t, batch_rule_t batch_rule>
  15838. ::std::tuple<at::Tensor,at::Tensor> gru_input_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
  15839. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15840. auto maybe_layer = maybeCurrentDynamicLayer();
  15841. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15842. int64_t cur_level = maybe_layer->layerId();
  15843. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
  15844. return at::_ops::gru_input::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
  15845. }
  15846. Tensor input_value;
  15847. optional<int64_t> input_bdim;
  15848. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  15849. Tensor hx_value;
  15850. optional<int64_t> hx_bdim;
  15851. std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
  15852. auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
  15853. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  15854. }
  15855. template <typename batch_rule_t, batch_rule_t batch_rule>
  15856. ::std::tuple<at::Tensor,at::Tensor> gru_data_generated_plumbing(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
  15857. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15858. auto maybe_layer = maybeCurrentDynamicLayer();
  15859. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15860. int64_t cur_level = maybe_layer->layerId();
  15861. if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
  15862. return at::_ops::gru_data::call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
  15863. }
  15864. Tensor data_value;
  15865. optional<int64_t> data_bdim;
  15866. std::tie(data_value, data_bdim) = unwrapTensorAtLevel(data, cur_level);
  15867. Tensor batch_sizes_value;
  15868. optional<int64_t> batch_sizes_bdim;
  15869. std::tie(batch_sizes_value, batch_sizes_bdim) = unwrapTensorAtLevel(batch_sizes, cur_level);
  15870. Tensor hx_value;
  15871. optional<int64_t> hx_bdim;
  15872. std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
  15873. auto results = batch_rule(data_value, data_bdim, batch_sizes_value, batch_sizes_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional);
  15874. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  15875. }
  15876. template <typename batch_rule_t, batch_rule_t batch_rule>
  15877. ::std::tuple<at::Tensor,at::Tensor> rnn_tanh_input_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
  15878. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15879. auto maybe_layer = maybeCurrentDynamicLayer();
  15880. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15881. int64_t cur_level = maybe_layer->layerId();
  15882. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
  15883. return at::_ops::rnn_tanh_input::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
  15884. }
  15885. Tensor input_value;
  15886. optional<int64_t> input_bdim;
  15887. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  15888. Tensor hx_value;
  15889. optional<int64_t> hx_bdim;
  15890. std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
  15891. auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
  15892. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  15893. }
  15894. template <typename batch_rule_t, batch_rule_t batch_rule>
  15895. ::std::tuple<at::Tensor,at::Tensor> rnn_tanh_data_generated_plumbing(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
  15896. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15897. auto maybe_layer = maybeCurrentDynamicLayer();
  15898. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15899. int64_t cur_level = maybe_layer->layerId();
  15900. if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
  15901. return at::_ops::rnn_tanh_data::call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
  15902. }
  15903. Tensor data_value;
  15904. optional<int64_t> data_bdim;
  15905. std::tie(data_value, data_bdim) = unwrapTensorAtLevel(data, cur_level);
  15906. Tensor batch_sizes_value;
  15907. optional<int64_t> batch_sizes_bdim;
  15908. std::tie(batch_sizes_value, batch_sizes_bdim) = unwrapTensorAtLevel(batch_sizes, cur_level);
  15909. Tensor hx_value;
  15910. optional<int64_t> hx_bdim;
  15911. std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
  15912. auto results = batch_rule(data_value, data_bdim, batch_sizes_value, batch_sizes_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional);
  15913. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  15914. }
  15915. template <typename batch_rule_t, batch_rule_t batch_rule>
  15916. ::std::tuple<at::Tensor,at::Tensor> rnn_relu_input_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
  15917. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15918. auto maybe_layer = maybeCurrentDynamicLayer();
  15919. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15920. int64_t cur_level = maybe_layer->layerId();
  15921. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
  15922. return at::_ops::rnn_relu_input::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
  15923. }
  15924. Tensor input_value;
  15925. optional<int64_t> input_bdim;
  15926. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  15927. Tensor hx_value;
  15928. optional<int64_t> hx_bdim;
  15929. std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
  15930. auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
  15931. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  15932. }
  15933. template <typename batch_rule_t, batch_rule_t batch_rule>
  15934. ::std::tuple<at::Tensor,at::Tensor> rnn_relu_data_generated_plumbing(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
  15935. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15936. auto maybe_layer = maybeCurrentDynamicLayer();
  15937. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15938. int64_t cur_level = maybe_layer->layerId();
  15939. if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
  15940. return at::_ops::rnn_relu_data::call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
  15941. }
  15942. Tensor data_value;
  15943. optional<int64_t> data_bdim;
  15944. std::tie(data_value, data_bdim) = unwrapTensorAtLevel(data, cur_level);
  15945. Tensor batch_sizes_value;
  15946. optional<int64_t> batch_sizes_bdim;
  15947. std::tie(batch_sizes_value, batch_sizes_bdim) = unwrapTensorAtLevel(batch_sizes, cur_level);
  15948. Tensor hx_value;
  15949. optional<int64_t> hx_bdim;
  15950. std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
  15951. auto results = batch_rule(data_value, data_bdim, batch_sizes_value, batch_sizes_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional);
  15952. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  15953. }
  15954. template <typename batch_rule_t, batch_rule_t batch_rule>
  15955. ::std::tuple<at::Tensor,at::Tensor> lstm_cell_generated_plumbing(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh) {
  15956. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15957. auto maybe_layer = maybeCurrentDynamicLayer();
  15958. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15959. int64_t cur_level = maybe_layer->layerId();
  15960. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level)) {
  15961. return at::_ops::lstm_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh);
  15962. }
  15963. Tensor input_value;
  15964. optional<int64_t> input_bdim;
  15965. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  15966. Tensor w_ih_value;
  15967. optional<int64_t> w_ih_bdim;
  15968. std::tie(w_ih_value, w_ih_bdim) = unwrapTensorAtLevel(w_ih, cur_level);
  15969. Tensor w_hh_value;
  15970. optional<int64_t> w_hh_bdim;
  15971. std::tie(w_hh_value, w_hh_bdim) = unwrapTensorAtLevel(w_hh, cur_level);
  15972. optional<Tensor> b_ih_value;
  15973. optional<int64_t> b_ih_bdim;
  15974. if (b_ih) {
  15975. std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih.value(), cur_level);
  15976. }
  15977. optional<Tensor> b_hh_value;
  15978. optional<int64_t> b_hh_bdim;
  15979. if (b_hh) {
  15980. std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh.value(), cur_level);
  15981. }
  15982. auto results = batch_rule(input_value, input_bdim, hx, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim);
  15983. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  15984. }
  15985. template <typename batch_rule_t, batch_rule_t batch_rule>
  15986. at::Tensor gru_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh) {
  15987. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  15988. auto maybe_layer = maybeCurrentDynamicLayer();
  15989. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  15990. int64_t cur_level = maybe_layer->layerId();
  15991. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level)) {
  15992. return at::_ops::gru_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh);
  15993. }
  15994. Tensor input_value;
  15995. optional<int64_t> input_bdim;
  15996. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  15997. Tensor hx_value;
  15998. optional<int64_t> hx_bdim;
  15999. std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
  16000. Tensor w_ih_value;
  16001. optional<int64_t> w_ih_bdim;
  16002. std::tie(w_ih_value, w_ih_bdim) = unwrapTensorAtLevel(w_ih, cur_level);
  16003. Tensor w_hh_value;
  16004. optional<int64_t> w_hh_bdim;
  16005. std::tie(w_hh_value, w_hh_bdim) = unwrapTensorAtLevel(w_hh, cur_level);
  16006. optional<Tensor> b_ih_value;
  16007. optional<int64_t> b_ih_bdim;
  16008. if (b_ih) {
  16009. std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih.value(), cur_level);
  16010. }
  16011. optional<Tensor> b_hh_value;
  16012. optional<int64_t> b_hh_bdim;
  16013. if (b_hh) {
  16014. std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh.value(), cur_level);
  16015. }
  16016. auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim);
  16017. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  16018. }
  16019. template <typename batch_rule_t, batch_rule_t batch_rule>
  16020. at::Tensor rnn_tanh_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh) {
  16021. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16022. auto maybe_layer = maybeCurrentDynamicLayer();
  16023. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  16024. int64_t cur_level = maybe_layer->layerId();
  16025. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level)) {
  16026. return at::_ops::rnn_tanh_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh);
  16027. }
  16028. Tensor input_value;
  16029. optional<int64_t> input_bdim;
  16030. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  16031. Tensor hx_value;
  16032. optional<int64_t> hx_bdim;
  16033. std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
  16034. Tensor w_ih_value;
  16035. optional<int64_t> w_ih_bdim;
  16036. std::tie(w_ih_value, w_ih_bdim) = unwrapTensorAtLevel(w_ih, cur_level);
  16037. Tensor w_hh_value;
  16038. optional<int64_t> w_hh_bdim;
  16039. std::tie(w_hh_value, w_hh_bdim) = unwrapTensorAtLevel(w_hh, cur_level);
  16040. optional<Tensor> b_ih_value;
  16041. optional<int64_t> b_ih_bdim;
  16042. if (b_ih) {
  16043. std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih.value(), cur_level);
  16044. }
  16045. optional<Tensor> b_hh_value;
  16046. optional<int64_t> b_hh_bdim;
  16047. if (b_hh) {
  16048. std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh.value(), cur_level);
  16049. }
  16050. auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim);
  16051. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  16052. }
  16053. template <typename batch_rule_t, batch_rule_t batch_rule>
  16054. at::Tensor rnn_relu_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh) {
  16055. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16056. auto maybe_layer = maybeCurrentDynamicLayer();
  16057. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  16058. int64_t cur_level = maybe_layer->layerId();
  16059. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level)) {
  16060. return at::_ops::rnn_relu_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh);
  16061. }
  16062. Tensor input_value;
  16063. optional<int64_t> input_bdim;
  16064. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  16065. Tensor hx_value;
  16066. optional<int64_t> hx_bdim;
  16067. std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
  16068. Tensor w_ih_value;
  16069. optional<int64_t> w_ih_bdim;
  16070. std::tie(w_ih_value, w_ih_bdim) = unwrapTensorAtLevel(w_ih, cur_level);
  16071. Tensor w_hh_value;
  16072. optional<int64_t> w_hh_bdim;
  16073. std::tie(w_hh_value, w_hh_bdim) = unwrapTensorAtLevel(w_hh, cur_level);
  16074. optional<Tensor> b_ih_value;
  16075. optional<int64_t> b_ih_bdim;
  16076. if (b_ih) {
  16077. std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih.value(), cur_level);
  16078. }
  16079. optional<Tensor> b_hh_value;
  16080. optional<int64_t> b_hh_bdim;
  16081. if (b_hh) {
  16082. std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh.value(), cur_level);
  16083. }
  16084. auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim);
  16085. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  16086. }
  16087. template <typename batch_rule_t, batch_rule_t batch_rule>
  16088. ::std::tuple<at::Tensor,at::Tensor> quantized_lstm_cell_generated_plumbing(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
  16089. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16090. auto maybe_layer = maybeCurrentDynamicLayer();
  16091. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  16092. int64_t cur_level = maybe_layer->layerId();
  16093. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level) && !isBatchedAtLevel(packed_ih, cur_level) && !isBatchedAtLevel(packed_hh, cur_level) && !isBatchedAtLevel(col_offsets_ih, cur_level) && !isBatchedAtLevel(col_offsets_hh, cur_level)) {
  16094. return at::_ops::quantized_lstm_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
  16095. }
  16096. Tensor input_value;
  16097. optional<int64_t> input_bdim;
  16098. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  16099. Tensor w_ih_value;
  16100. optional<int64_t> w_ih_bdim;
  16101. std::tie(w_ih_value, w_ih_bdim) = unwrapTensorAtLevel(w_ih, cur_level);
  16102. Tensor w_hh_value;
  16103. optional<int64_t> w_hh_bdim;
  16104. std::tie(w_hh_value, w_hh_bdim) = unwrapTensorAtLevel(w_hh, cur_level);
  16105. Tensor b_ih_value;
  16106. optional<int64_t> b_ih_bdim;
  16107. std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih, cur_level);
  16108. Tensor b_hh_value;
  16109. optional<int64_t> b_hh_bdim;
  16110. std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh, cur_level);
  16111. Tensor packed_ih_value;
  16112. optional<int64_t> packed_ih_bdim;
  16113. std::tie(packed_ih_value, packed_ih_bdim) = unwrapTensorAtLevel(packed_ih, cur_level);
  16114. Tensor packed_hh_value;
  16115. optional<int64_t> packed_hh_bdim;
  16116. std::tie(packed_hh_value, packed_hh_bdim) = unwrapTensorAtLevel(packed_hh, cur_level);
  16117. Tensor col_offsets_ih_value;
  16118. optional<int64_t> col_offsets_ih_bdim;
  16119. std::tie(col_offsets_ih_value, col_offsets_ih_bdim) = unwrapTensorAtLevel(col_offsets_ih, cur_level);
  16120. Tensor col_offsets_hh_value;
  16121. optional<int64_t> col_offsets_hh_bdim;
  16122. std::tie(col_offsets_hh_value, col_offsets_hh_bdim) = unwrapTensorAtLevel(col_offsets_hh, cur_level);
  16123. auto results = batch_rule(input_value, input_bdim, hx, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim, packed_ih_value, packed_ih_bdim, packed_hh_value, packed_hh_bdim, col_offsets_ih_value, col_offsets_ih_bdim, col_offsets_hh_value, col_offsets_hh_bdim, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
  16124. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  16125. }
  16126. template <typename batch_rule_t, batch_rule_t batch_rule>
  16127. at::Tensor quantized_gru_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
  16128. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16129. auto maybe_layer = maybeCurrentDynamicLayer();
  16130. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  16131. int64_t cur_level = maybe_layer->layerId();
  16132. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level) && !isBatchedAtLevel(packed_ih, cur_level) && !isBatchedAtLevel(packed_hh, cur_level) && !isBatchedAtLevel(col_offsets_ih, cur_level) && !isBatchedAtLevel(col_offsets_hh, cur_level)) {
  16133. return at::_ops::quantized_gru_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
  16134. }
  16135. Tensor input_value;
  16136. optional<int64_t> input_bdim;
  16137. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  16138. Tensor hx_value;
  16139. optional<int64_t> hx_bdim;
  16140. std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
  16141. Tensor w_ih_value;
  16142. optional<int64_t> w_ih_bdim;
  16143. std::tie(w_ih_value, w_ih_bdim) = unwrapTensorAtLevel(w_ih, cur_level);
  16144. Tensor w_hh_value;
  16145. optional<int64_t> w_hh_bdim;
  16146. std::tie(w_hh_value, w_hh_bdim) = unwrapTensorAtLevel(w_hh, cur_level);
  16147. Tensor b_ih_value;
  16148. optional<int64_t> b_ih_bdim;
  16149. std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih, cur_level);
  16150. Tensor b_hh_value;
  16151. optional<int64_t> b_hh_bdim;
  16152. std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh, cur_level);
  16153. Tensor packed_ih_value;
  16154. optional<int64_t> packed_ih_bdim;
  16155. std::tie(packed_ih_value, packed_ih_bdim) = unwrapTensorAtLevel(packed_ih, cur_level);
  16156. Tensor packed_hh_value;
  16157. optional<int64_t> packed_hh_bdim;
  16158. std::tie(packed_hh_value, packed_hh_bdim) = unwrapTensorAtLevel(packed_hh, cur_level);
  16159. Tensor col_offsets_ih_value;
  16160. optional<int64_t> col_offsets_ih_bdim;
  16161. std::tie(col_offsets_ih_value, col_offsets_ih_bdim) = unwrapTensorAtLevel(col_offsets_ih, cur_level);
  16162. Tensor col_offsets_hh_value;
  16163. optional<int64_t> col_offsets_hh_bdim;
  16164. std::tie(col_offsets_hh_value, col_offsets_hh_bdim) = unwrapTensorAtLevel(col_offsets_hh, cur_level);
  16165. auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim, packed_ih_value, packed_ih_bdim, packed_hh_value, packed_hh_bdim, col_offsets_ih_value, col_offsets_ih_bdim, col_offsets_hh_value, col_offsets_hh_bdim, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
  16166. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  16167. }
  16168. template <typename batch_rule_t, batch_rule_t batch_rule>
  16169. at::Tensor quantized_rnn_relu_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
  16170. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16171. auto maybe_layer = maybeCurrentDynamicLayer();
  16172. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  16173. int64_t cur_level = maybe_layer->layerId();
  16174. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level) && !isBatchedAtLevel(packed_ih, cur_level) && !isBatchedAtLevel(packed_hh, cur_level) && !isBatchedAtLevel(col_offsets_ih, cur_level) && !isBatchedAtLevel(col_offsets_hh, cur_level)) {
  16175. return at::_ops::quantized_rnn_relu_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
  16176. }
  16177. Tensor input_value;
  16178. optional<int64_t> input_bdim;
  16179. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  16180. Tensor hx_value;
  16181. optional<int64_t> hx_bdim;
  16182. std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
  16183. Tensor w_ih_value;
  16184. optional<int64_t> w_ih_bdim;
  16185. std::tie(w_ih_value, w_ih_bdim) = unwrapTensorAtLevel(w_ih, cur_level);
  16186. Tensor w_hh_value;
  16187. optional<int64_t> w_hh_bdim;
  16188. std::tie(w_hh_value, w_hh_bdim) = unwrapTensorAtLevel(w_hh, cur_level);
  16189. Tensor b_ih_value;
  16190. optional<int64_t> b_ih_bdim;
  16191. std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih, cur_level);
  16192. Tensor b_hh_value;
  16193. optional<int64_t> b_hh_bdim;
  16194. std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh, cur_level);
  16195. Tensor packed_ih_value;
  16196. optional<int64_t> packed_ih_bdim;
  16197. std::tie(packed_ih_value, packed_ih_bdim) = unwrapTensorAtLevel(packed_ih, cur_level);
  16198. Tensor packed_hh_value;
  16199. optional<int64_t> packed_hh_bdim;
  16200. std::tie(packed_hh_value, packed_hh_bdim) = unwrapTensorAtLevel(packed_hh, cur_level);
  16201. Tensor col_offsets_ih_value;
  16202. optional<int64_t> col_offsets_ih_bdim;
  16203. std::tie(col_offsets_ih_value, col_offsets_ih_bdim) = unwrapTensorAtLevel(col_offsets_ih, cur_level);
  16204. Tensor col_offsets_hh_value;
  16205. optional<int64_t> col_offsets_hh_bdim;
  16206. std::tie(col_offsets_hh_value, col_offsets_hh_bdim) = unwrapTensorAtLevel(col_offsets_hh, cur_level);
  16207. auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim, packed_ih_value, packed_ih_bdim, packed_hh_value, packed_hh_bdim, col_offsets_ih_value, col_offsets_ih_bdim, col_offsets_hh_value, col_offsets_hh_bdim, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
  16208. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  16209. }
  16210. template <typename batch_rule_t, batch_rule_t batch_rule>
  16211. at::Tensor quantized_rnn_tanh_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
  16212. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16213. auto maybe_layer = maybeCurrentDynamicLayer();
  16214. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  16215. int64_t cur_level = maybe_layer->layerId();
  16216. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level) && !isBatchedAtLevel(packed_ih, cur_level) && !isBatchedAtLevel(packed_hh, cur_level) && !isBatchedAtLevel(col_offsets_ih, cur_level) && !isBatchedAtLevel(col_offsets_hh, cur_level)) {
  16217. return at::_ops::quantized_rnn_tanh_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
  16218. }
  16219. Tensor input_value;
  16220. optional<int64_t> input_bdim;
  16221. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  16222. Tensor hx_value;
  16223. optional<int64_t> hx_bdim;
  16224. std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
  16225. Tensor w_ih_value;
  16226. optional<int64_t> w_ih_bdim;
  16227. std::tie(w_ih_value, w_ih_bdim) = unwrapTensorAtLevel(w_ih, cur_level);
  16228. Tensor w_hh_value;
  16229. optional<int64_t> w_hh_bdim;
  16230. std::tie(w_hh_value, w_hh_bdim) = unwrapTensorAtLevel(w_hh, cur_level);
  16231. Tensor b_ih_value;
  16232. optional<int64_t> b_ih_bdim;
  16233. std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih, cur_level);
  16234. Tensor b_hh_value;
  16235. optional<int64_t> b_hh_bdim;
  16236. std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh, cur_level);
  16237. Tensor packed_ih_value;
  16238. optional<int64_t> packed_ih_bdim;
  16239. std::tie(packed_ih_value, packed_ih_bdim) = unwrapTensorAtLevel(packed_ih, cur_level);
  16240. Tensor packed_hh_value;
  16241. optional<int64_t> packed_hh_bdim;
  16242. std::tie(packed_hh_value, packed_hh_bdim) = unwrapTensorAtLevel(packed_hh, cur_level);
  16243. Tensor col_offsets_ih_value;
  16244. optional<int64_t> col_offsets_ih_bdim;
  16245. std::tie(col_offsets_ih_value, col_offsets_ih_bdim) = unwrapTensorAtLevel(col_offsets_ih, cur_level);
  16246. Tensor col_offsets_hh_value;
  16247. optional<int64_t> col_offsets_hh_bdim;
  16248. std::tie(col_offsets_hh_value, col_offsets_hh_bdim) = unwrapTensorAtLevel(col_offsets_hh, cur_level);
  16249. auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim, packed_ih_value, packed_ih_bdim, packed_hh_value, packed_hh_bdim, col_offsets_ih_value, col_offsets_ih_bdim, col_offsets_hh_value, col_offsets_hh_bdim, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
  16250. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  16251. }
  16252. template <typename batch_rule_t, batch_rule_t batch_rule>
  16253. ::std::tuple<at::Tensor,at::Tensor> _pack_padded_sequence_generated_plumbing(const at::Tensor & input, const at::Tensor & lengths, bool batch_first) {
  16254. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16255. auto maybe_layer = maybeCurrentDynamicLayer();
  16256. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  16257. int64_t cur_level = maybe_layer->layerId();
  16258. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(lengths, cur_level)) {
  16259. return at::_ops::_pack_padded_sequence::call(input, lengths, batch_first);
  16260. }
  16261. Tensor input_value;
  16262. optional<int64_t> input_bdim;
  16263. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  16264. Tensor lengths_value;
  16265. optional<int64_t> lengths_bdim;
  16266. std::tie(lengths_value, lengths_bdim) = unwrapTensorAtLevel(lengths, cur_level);
  16267. auto results = batch_rule(input_value, input_bdim, lengths_value, lengths_bdim, batch_first);
  16268. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  16269. }
  16270. template <typename batch_rule_t, batch_rule_t batch_rule>
  16271. at::Tensor _pack_padded_sequence_backward_generated_plumbing(const at::Tensor & grad, c10::SymIntArrayRef input_size, const at::Tensor & batch_sizes, bool batch_first) {
  16272. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16273. auto maybe_layer = maybeCurrentDynamicLayer();
  16274. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  16275. int64_t cur_level = maybe_layer->layerId();
  16276. if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level)) {
  16277. return at::_ops::_pack_padded_sequence_backward::call(grad, input_size, batch_sizes, batch_first);
  16278. }
  16279. Tensor grad_value;
  16280. optional<int64_t> grad_bdim;
  16281. std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
  16282. Tensor batch_sizes_value;
  16283. optional<int64_t> batch_sizes_bdim;
  16284. std::tie(batch_sizes_value, batch_sizes_bdim) = unwrapTensorAtLevel(batch_sizes, cur_level);
  16285. auto results = batch_rule(grad_value, grad_bdim, input_size, batch_sizes_value, batch_sizes_bdim, batch_first);
  16286. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  16287. }
  16288. template <typename batch_rule_t, batch_rule_t batch_rule>
  16289. ::std::tuple<at::Tensor,at::Tensor> _pad_packed_sequence_generated_plumbing(const at::Tensor & data, const at::Tensor & batch_sizes, bool batch_first, const at::Scalar & padding_value, int64_t total_length) {
  16290. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16291. auto maybe_layer = maybeCurrentDynamicLayer();
  16292. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  16293. int64_t cur_level = maybe_layer->layerId();
  16294. if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level)) {
  16295. return at::_ops::_pad_packed_sequence::call(data, batch_sizes, batch_first, padding_value, total_length);
  16296. }
  16297. Tensor data_value;
  16298. optional<int64_t> data_bdim;
  16299. std::tie(data_value, data_bdim) = unwrapTensorAtLevel(data, cur_level);
  16300. Tensor batch_sizes_value;
  16301. optional<int64_t> batch_sizes_bdim;
  16302. std::tie(batch_sizes_value, batch_sizes_bdim) = unwrapTensorAtLevel(batch_sizes, cur_level);
  16303. auto results = batch_rule(data_value, data_bdim, batch_sizes_value, batch_sizes_bdim, batch_first, padding_value, total_length);
  16304. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  16305. }
  16306. template <typename batch_rule_t, batch_rule_t batch_rule>
  16307. at::Tensor & set__source_Storage_generated_plumbing(at::Tensor & self, at::Storage source) {
  16308. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16309. auto maybe_layer = maybeCurrentDynamicLayer();
  16310. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  16311. int64_t cur_level = maybe_layer->layerId();
  16312. if (!isBatchedAtLevel(self, cur_level)) {
  16313. return at::_ops::set__source_Storage::call(self, source);
  16314. }
  16315. Tensor self_value;
  16316. optional<int64_t> self_bdim;
  16317. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  16318. batch_rule(self_value, self_bdim, source);
  16319. return self;
  16320. }
  16321. template <typename batch_rule_t, batch_rule_t batch_rule>
  16322. at::Tensor & set__source_Storage_storage_offset_generated_plumbing(at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
  16323. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16324. auto maybe_layer = maybeCurrentDynamicLayer();
  16325. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  16326. int64_t cur_level = maybe_layer->layerId();
  16327. if (!isBatchedAtLevel(self, cur_level)) {
  16328. return at::_ops::set__source_Storage_storage_offset::call(self, source, storage_offset, size, stride);
  16329. }
  16330. Tensor self_value;
  16331. optional<int64_t> self_bdim;
  16332. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  16333. batch_rule(self_value, self_bdim, source, storage_offset, size, stride);
  16334. return self;
  16335. }
  16336. template <typename batch_rule_t, batch_rule_t batch_rule>
  16337. at::Tensor & set__source_Tensor_storage_offset_generated_plumbing(at::Tensor & self, const at::Tensor & source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
  16338. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16339. auto maybe_layer = maybeCurrentDynamicLayer();
  16340. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  16341. int64_t cur_level = maybe_layer->layerId();
  16342. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(source, cur_level)) {
  16343. return at::_ops::set__source_Tensor_storage_offset::call(self, source, storage_offset, size, stride);
  16344. }
  16345. Tensor self_value;
  16346. optional<int64_t> self_bdim;
  16347. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  16348. Tensor source_value;
  16349. optional<int64_t> source_bdim;
  16350. std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
  16351. batch_rule(self_value, self_bdim, source_value, source_bdim, storage_offset, size, stride);
  16352. return self;
  16353. }
  16354. template <typename batch_rule_t, batch_rule_t batch_rule>
  16355. at::Tensor & set__source_Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & source) {
  16356. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16357. auto maybe_layer = maybeCurrentDynamicLayer();
  16358. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  16359. int64_t cur_level = maybe_layer->layerId();
  16360. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(source, cur_level)) {
  16361. return at::_ops::set__source_Tensor::call(self, source);
  16362. }
  16363. Tensor self_value;
  16364. optional<int64_t> self_bdim;
  16365. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  16366. Tensor source_value;
  16367. optional<int64_t> source_bdim;
  16368. std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
  16369. batch_rule(self_value, self_bdim, source_value, source_bdim);
  16370. return self;
  16371. }
  16372. template <typename batch_rule_t, batch_rule_t batch_rule>
  16373. at::Tensor & set__generated_plumbing(at::Tensor & self) {
  16374. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16375. auto maybe_layer = maybeCurrentDynamicLayer();
  16376. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  16377. int64_t cur_level = maybe_layer->layerId();
  16378. if (!isBatchedAtLevel(self, cur_level)) {
  16379. return at::_ops::set_::call(self);
  16380. }
  16381. Tensor self_value;
  16382. optional<int64_t> self_bdim;
  16383. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  16384. batch_rule(self_value, self_bdim);
  16385. return self;
  16386. }
  16387. template <typename batch_rule_t, batch_rule_t batch_rule>
  16388. at::Tensor lift_generated_plumbing(const at::Tensor & self) {
  16389. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16390. auto maybe_layer = maybeCurrentDynamicLayer();
  16391. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  16392. int64_t cur_level = maybe_layer->layerId();
  16393. if (!isBatchedAtLevel(self, cur_level)) {
  16394. return at::_ops::lift::call(self);
  16395. }
  16396. Tensor self_value;
  16397. optional<int64_t> self_bdim;
  16398. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  16399. auto results = batch_rule(self_value, self_bdim);
  16400. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  16401. }
  16402. template <typename batch_rule_t, batch_rule_t batch_rule>
  16403. at::Tensor lift_fresh_generated_plumbing(const at::Tensor & self) {
  16404. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16405. auto maybe_layer = maybeCurrentDynamicLayer();
  16406. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  16407. int64_t cur_level = maybe_layer->layerId();
  16408. if (!isBatchedAtLevel(self, cur_level)) {
  16409. return at::_ops::lift_fresh::call(self);
  16410. }
  16411. Tensor self_value;
  16412. optional<int64_t> self_bdim;
  16413. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  16414. auto results = batch_rule(self_value, self_bdim);
  16415. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  16416. }
  16417. template <typename batch_rule_t, batch_rule_t batch_rule>
  16418. at::Tensor lift_fresh_copy_generated_plumbing(const at::Tensor & self) {
  16419. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16420. auto maybe_layer = maybeCurrentDynamicLayer();
  16421. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  16422. int64_t cur_level = maybe_layer->layerId();
  16423. if (!isBatchedAtLevel(self, cur_level)) {
  16424. return at::_ops::lift_fresh_copy::call(self);
  16425. }
  16426. Tensor self_value;
  16427. optional<int64_t> self_bdim;
  16428. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  16429. auto results = batch_rule(self_value, self_bdim);
  16430. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  16431. }
  16432. template <typename batch_rule_t, batch_rule_t batch_rule>
  16433. at::Tensor & masked_fill__Scalar_generated_plumbing(at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) {
  16434. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16435. auto maybe_layer = maybeCurrentDynamicLayer();
  16436. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  16437. int64_t cur_level = maybe_layer->layerId();
  16438. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
  16439. return at::_ops::masked_fill__Scalar::call(self, mask, value);
  16440. }
  16441. Tensor self_value;
  16442. optional<int64_t> self_bdim;
  16443. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  16444. Tensor mask_value;
  16445. optional<int64_t> mask_bdim;
  16446. std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
  16447. batch_rule(self_value, self_bdim, mask_value, mask_bdim, value);
  16448. return self;
  16449. }
  16450. template <typename batch_rule_t, batch_rule_t batch_rule>
  16451. at::Tensor masked_fill_Scalar_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) {
  16452. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16453. auto maybe_layer = maybeCurrentDynamicLayer();
  16454. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  16455. int64_t cur_level = maybe_layer->layerId();
  16456. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
  16457. return at::_ops::masked_fill_Scalar::call(self, mask, value);
  16458. }
  16459. Tensor self_value;
  16460. optional<int64_t> self_bdim;
  16461. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  16462. Tensor mask_value;
  16463. optional<int64_t> mask_bdim;
  16464. std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
  16465. auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, value);
  16466. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  16467. }
  16468. template <typename batch_rule_t, batch_rule_t batch_rule>
  16469. at::Tensor & masked_fill__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) {
  16470. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16471. auto maybe_layer = maybeCurrentDynamicLayer();
  16472. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  16473. int64_t cur_level = maybe_layer->layerId();
  16474. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level) && !isBatchedAtLevel(value, cur_level)) {
  16475. return at::_ops::masked_fill__Tensor::call(self, mask, value);
  16476. }
  16477. Tensor self_value;
  16478. optional<int64_t> self_bdim;
  16479. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  16480. Tensor mask_value;
  16481. optional<int64_t> mask_bdim;
  16482. std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
  16483. Tensor value_value;
  16484. optional<int64_t> value_bdim;
  16485. std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
  16486. batch_rule(self_value, self_bdim, mask_value, mask_bdim, value_value, value_bdim);
  16487. return self;
  16488. }
  16489. template <typename batch_rule_t, batch_rule_t batch_rule>
  16490. at::Tensor masked_fill_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) {
  16491. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16492. auto maybe_layer = maybeCurrentDynamicLayer();
  16493. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  16494. int64_t cur_level = maybe_layer->layerId();
  16495. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level) && !isBatchedAtLevel(value, cur_level)) {
  16496. return at::_ops::masked_fill_Tensor::call(self, mask, value);
  16497. }
  16498. Tensor self_value;
  16499. optional<int64_t> self_bdim;
  16500. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  16501. Tensor mask_value;
  16502. optional<int64_t> mask_bdim;
  16503. std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
  16504. Tensor value_value;
  16505. optional<int64_t> value_bdim;
  16506. std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
  16507. auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, value_value, value_bdim);
  16508. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  16509. }
  16510. template <typename batch_rule_t, batch_rule_t batch_rule>
  16511. at::Tensor & masked_scatter__generated_plumbing(at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) {
  16512. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16513. auto maybe_layer = maybeCurrentDynamicLayer();
  16514. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  16515. int64_t cur_level = maybe_layer->layerId();
  16516. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level) && !isBatchedAtLevel(source, cur_level)) {
  16517. return at::_ops::masked_scatter_::call(self, mask, source);
  16518. }
  16519. Tensor self_value;
  16520. optional<int64_t> self_bdim;
  16521. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  16522. Tensor mask_value;
  16523. optional<int64_t> mask_bdim;
  16524. std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
  16525. Tensor source_value;
  16526. optional<int64_t> source_bdim;
  16527. std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
  16528. batch_rule(self_value, self_bdim, mask_value, mask_bdim, source_value, source_bdim);
  16529. return self;
  16530. }
  16531. template <typename batch_rule_t, batch_rule_t batch_rule>
  16532. at::Tensor masked_scatter_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) {
  16533. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16534. auto maybe_layer = maybeCurrentDynamicLayer();
  16535. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  16536. int64_t cur_level = maybe_layer->layerId();
  16537. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level) && !isBatchedAtLevel(source, cur_level)) {
  16538. return at::_ops::masked_scatter::call(self, mask, source);
  16539. }
  16540. Tensor self_value;
  16541. optional<int64_t> self_bdim;
  16542. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  16543. Tensor mask_value;
  16544. optional<int64_t> mask_bdim;
  16545. std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
  16546. Tensor source_value;
  16547. optional<int64_t> source_bdim;
  16548. std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
  16549. auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, source_value, source_bdim);
  16550. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  16551. }
  16552. template <typename batch_rule_t, batch_rule_t batch_rule>
  16553. at::Tensor _masked_softmax_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, c10::optional<int64_t> dim, c10::optional<int64_t> mask_type) {
  16554. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16555. auto maybe_layer = maybeCurrentDynamicLayer();
  16556. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  16557. int64_t cur_level = maybe_layer->layerId();
  16558. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
  16559. return at::_ops::_masked_softmax::call(self, mask, dim, mask_type);
  16560. }
  16561. Tensor self_value;
  16562. optional<int64_t> self_bdim;
  16563. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  16564. Tensor mask_value;
  16565. optional<int64_t> mask_bdim;
  16566. std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
  16567. auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, dim, mask_type);
  16568. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  16569. }
  16570. template <typename batch_rule_t, batch_rule_t batch_rule>
  16571. at::Tensor _masked_softmax_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, c10::optional<int64_t> dim) {
  16572. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16573. auto maybe_layer = maybeCurrentDynamicLayer();
  16574. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  16575. int64_t cur_level = maybe_layer->layerId();
  16576. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
  16577. return at::_ops::_masked_softmax_backward::call(grad_output, output, mask, dim);
  16578. }
  16579. Tensor grad_output_value;
  16580. optional<int64_t> grad_output_bdim;
  16581. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  16582. Tensor output_value;
  16583. optional<int64_t> output_bdim;
  16584. std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
  16585. Tensor mask_value;
  16586. optional<int64_t> mask_bdim;
  16587. std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
  16588. auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, mask_value, mask_bdim, dim);
  16589. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  16590. }
  16591. template <typename batch_rule_t, batch_rule_t batch_rule>
  16592. at::Tensor view_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size) {
  16593. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16594. auto maybe_layer = maybeCurrentDynamicLayer();
  16595. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  16596. int64_t cur_level = maybe_layer->layerId();
  16597. if (!isBatchedAtLevel(self, cur_level)) {
  16598. return at::_ops::view::call(self, size);
  16599. }
  16600. Tensor self_value;
  16601. optional<int64_t> self_bdim;
  16602. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  16603. auto results = batch_rule(self_value, self_bdim, size);
  16604. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  16605. }
  16606. template <typename batch_rule_t, batch_rule_t batch_rule>
  16607. at::Tensor view_dtype_generated_plumbing(const at::Tensor & self, at::ScalarType dtype) {
  16608. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16609. auto maybe_layer = maybeCurrentDynamicLayer();
  16610. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  16611. int64_t cur_level = maybe_layer->layerId();
  16612. if (!isBatchedAtLevel(self, cur_level)) {
  16613. return at::_ops::view_dtype::call(self, dtype);
  16614. }
  16615. Tensor self_value;
  16616. optional<int64_t> self_bdim;
  16617. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  16618. auto results = batch_rule(self_value, self_bdim, dtype);
  16619. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  16620. }
  16621. template <typename batch_rule_t, batch_rule_t batch_rule>
  16622. at::Tensor & put__generated_plumbing(at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate) {
  16623. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16624. auto maybe_layer = maybeCurrentDynamicLayer();
  16625. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  16626. int64_t cur_level = maybe_layer->layerId();
  16627. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
  16628. return at::_ops::put_::call(self, index, source, accumulate);
  16629. }
  16630. Tensor self_value;
  16631. optional<int64_t> self_bdim;
  16632. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  16633. Tensor index_value;
  16634. optional<int64_t> index_bdim;
  16635. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  16636. Tensor source_value;
  16637. optional<int64_t> source_bdim;
  16638. std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
  16639. batch_rule(self_value, self_bdim, index_value, index_bdim, source_value, source_bdim, accumulate);
  16640. return self;
  16641. }
  16642. template <typename batch_rule_t, batch_rule_t batch_rule>
  16643. at::Tensor put_generated_plumbing(const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate) {
  16644. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16645. auto maybe_layer = maybeCurrentDynamicLayer();
  16646. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  16647. int64_t cur_level = maybe_layer->layerId();
  16648. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
  16649. return at::_ops::put::call(self, index, source, accumulate);
  16650. }
  16651. Tensor self_value;
  16652. optional<int64_t> self_bdim;
  16653. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  16654. Tensor index_value;
  16655. optional<int64_t> index_bdim;
  16656. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  16657. Tensor source_value;
  16658. optional<int64_t> source_bdim;
  16659. std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
  16660. auto results = batch_rule(self_value, self_bdim, index_value, index_bdim, source_value, source_bdim, accumulate);
  16661. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  16662. }
  16663. template <typename batch_rule_t, batch_rule_t batch_rule>
  16664. at::Tensor & index_add__generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
  16665. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16666. auto maybe_layer = maybeCurrentDynamicLayer();
  16667. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  16668. int64_t cur_level = maybe_layer->layerId();
  16669. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
  16670. return at::_ops::index_add_::call(self, dim, index, source, alpha);
  16671. }
  16672. Tensor self_value;
  16673. optional<int64_t> self_bdim;
  16674. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  16675. Tensor index_value;
  16676. optional<int64_t> index_bdim;
  16677. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  16678. Tensor source_value;
  16679. optional<int64_t> source_bdim;
  16680. std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
  16681. batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim, alpha);
  16682. return self;
  16683. }
  16684. template <typename batch_rule_t, batch_rule_t batch_rule>
  16685. at::Tensor index_add_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
  16686. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16687. auto maybe_layer = maybeCurrentDynamicLayer();
  16688. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  16689. int64_t cur_level = maybe_layer->layerId();
  16690. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
  16691. return at::_ops::index_add::call(self, dim, index, source, alpha);
  16692. }
  16693. Tensor self_value;
  16694. optional<int64_t> self_bdim;
  16695. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  16696. Tensor index_value;
  16697. optional<int64_t> index_bdim;
  16698. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  16699. Tensor source_value;
  16700. optional<int64_t> source_bdim;
  16701. std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
  16702. auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim, alpha);
  16703. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  16704. }
  16705. template <typename batch_rule_t, batch_rule_t batch_rule>
  16706. at::Tensor index_add_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
  16707. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16708. auto maybe_layer = maybeCurrentDynamicLayer();
  16709. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  16710. int64_t cur_level = maybe_layer->layerId();
  16711. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
  16712. return at::_ops::index_add_dimname::call(self, dim, index, source, alpha);
  16713. }
  16714. Tensor self_value;
  16715. optional<int64_t> self_bdim;
  16716. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  16717. Tensor index_value;
  16718. optional<int64_t> index_bdim;
  16719. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  16720. Tensor source_value;
  16721. optional<int64_t> source_bdim;
  16722. std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
  16723. auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim, alpha);
  16724. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  16725. }
  16726. template <typename batch_rule_t, batch_rule_t batch_rule>
  16727. at::Tensor & index_reduce__generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) {
  16728. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16729. auto maybe_layer = maybeCurrentDynamicLayer();
  16730. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  16731. int64_t cur_level = maybe_layer->layerId();
  16732. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
  16733. return at::_ops::index_reduce_::call(self, dim, index, source, reduce, include_self);
  16734. }
  16735. Tensor self_value;
  16736. optional<int64_t> self_bdim;
  16737. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  16738. Tensor index_value;
  16739. optional<int64_t> index_bdim;
  16740. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  16741. Tensor source_value;
  16742. optional<int64_t> source_bdim;
  16743. std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
  16744. batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim, reduce, include_self);
  16745. return self;
  16746. }
  16747. template <typename batch_rule_t, batch_rule_t batch_rule>
  16748. at::Tensor index_reduce_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) {
  16749. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16750. auto maybe_layer = maybeCurrentDynamicLayer();
  16751. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  16752. int64_t cur_level = maybe_layer->layerId();
  16753. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
  16754. return at::_ops::index_reduce::call(self, dim, index, source, reduce, include_self);
  16755. }
  16756. Tensor self_value;
  16757. optional<int64_t> self_bdim;
  16758. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  16759. Tensor index_value;
  16760. optional<int64_t> index_bdim;
  16761. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  16762. Tensor source_value;
  16763. optional<int64_t> source_bdim;
  16764. std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
  16765. auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim, reduce, include_self);
  16766. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  16767. }
  16768. template <typename batch_rule_t, batch_rule_t batch_rule>
  16769. at::Tensor & index_fill__int_Scalar_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
  16770. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16771. auto maybe_layer = maybeCurrentDynamicLayer();
  16772. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  16773. int64_t cur_level = maybe_layer->layerId();
  16774. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
  16775. return at::_ops::index_fill__int_Scalar::call(self, dim, index, value);
  16776. }
  16777. Tensor self_value;
  16778. optional<int64_t> self_bdim;
  16779. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  16780. Tensor index_value;
  16781. optional<int64_t> index_bdim;
  16782. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  16783. batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value);
  16784. return self;
  16785. }
  16786. template <typename batch_rule_t, batch_rule_t batch_rule>
  16787. at::Tensor index_fill_int_Scalar_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
  16788. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16789. auto maybe_layer = maybeCurrentDynamicLayer();
  16790. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  16791. int64_t cur_level = maybe_layer->layerId();
  16792. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
  16793. return at::_ops::index_fill_int_Scalar::call(self, dim, index, value);
  16794. }
  16795. Tensor self_value;
  16796. optional<int64_t> self_bdim;
  16797. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  16798. Tensor index_value;
  16799. optional<int64_t> index_bdim;
  16800. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  16801. auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value);
  16802. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  16803. }
  16804. template <typename batch_rule_t, batch_rule_t batch_rule>
  16805. at::Tensor & index_fill__int_Tensor_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) {
  16806. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16807. auto maybe_layer = maybeCurrentDynamicLayer();
  16808. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  16809. int64_t cur_level = maybe_layer->layerId();
  16810. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(value, cur_level)) {
  16811. return at::_ops::index_fill__int_Tensor::call(self, dim, index, value);
  16812. }
  16813. Tensor self_value;
  16814. optional<int64_t> self_bdim;
  16815. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  16816. Tensor index_value;
  16817. optional<int64_t> index_bdim;
  16818. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  16819. Tensor value_value;
  16820. optional<int64_t> value_bdim;
  16821. std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
  16822. batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value_value, value_bdim);
  16823. return self;
  16824. }
  16825. template <typename batch_rule_t, batch_rule_t batch_rule>
  16826. at::Tensor index_fill_int_Tensor_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) {
  16827. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16828. auto maybe_layer = maybeCurrentDynamicLayer();
  16829. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  16830. int64_t cur_level = maybe_layer->layerId();
  16831. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(value, cur_level)) {
  16832. return at::_ops::index_fill_int_Tensor::call(self, dim, index, value);
  16833. }
  16834. Tensor self_value;
  16835. optional<int64_t> self_bdim;
  16836. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  16837. Tensor index_value;
  16838. optional<int64_t> index_bdim;
  16839. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  16840. Tensor value_value;
  16841. optional<int64_t> value_bdim;
  16842. std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
  16843. auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value_value, value_bdim);
  16844. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  16845. }
  16846. template <typename batch_rule_t, batch_rule_t batch_rule>
  16847. at::Tensor & index_fill__Dimname_Scalar_generated_plumbing(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) {
  16848. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16849. auto maybe_layer = maybeCurrentDynamicLayer();
  16850. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  16851. int64_t cur_level = maybe_layer->layerId();
  16852. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
  16853. return at::_ops::index_fill__Dimname_Scalar::call(self, dim, index, value);
  16854. }
  16855. Tensor self_value;
  16856. optional<int64_t> self_bdim;
  16857. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  16858. Tensor index_value;
  16859. optional<int64_t> index_bdim;
  16860. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  16861. batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value);
  16862. return self;
  16863. }
  16864. template <typename batch_rule_t, batch_rule_t batch_rule>
  16865. at::Tensor & index_fill__Dimname_Tensor_generated_plumbing(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value) {
  16866. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16867. auto maybe_layer = maybeCurrentDynamicLayer();
  16868. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  16869. int64_t cur_level = maybe_layer->layerId();
  16870. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(value, cur_level)) {
  16871. return at::_ops::index_fill__Dimname_Tensor::call(self, dim, index, value);
  16872. }
  16873. Tensor self_value;
  16874. optional<int64_t> self_bdim;
  16875. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  16876. Tensor index_value;
  16877. optional<int64_t> index_bdim;
  16878. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  16879. Tensor value_value;
  16880. optional<int64_t> value_bdim;
  16881. std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
  16882. batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value_value, value_bdim);
  16883. return self;
  16884. }
  16885. template <typename batch_rule_t, batch_rule_t batch_rule>
  16886. at::Tensor index_fill_Dimname_Scalar_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) {
  16887. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16888. auto maybe_layer = maybeCurrentDynamicLayer();
  16889. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  16890. int64_t cur_level = maybe_layer->layerId();
  16891. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
  16892. return at::_ops::index_fill_Dimname_Scalar::call(self, dim, index, value);
  16893. }
  16894. Tensor self_value;
  16895. optional<int64_t> self_bdim;
  16896. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  16897. Tensor index_value;
  16898. optional<int64_t> index_bdim;
  16899. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  16900. auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value);
  16901. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  16902. }
  16903. template <typename batch_rule_t, batch_rule_t batch_rule>
  16904. at::Tensor index_fill_Dimname_Tensor_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value) {
  16905. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16906. auto maybe_layer = maybeCurrentDynamicLayer();
  16907. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  16908. int64_t cur_level = maybe_layer->layerId();
  16909. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(value, cur_level)) {
  16910. return at::_ops::index_fill_Dimname_Tensor::call(self, dim, index, value);
  16911. }
  16912. Tensor self_value;
  16913. optional<int64_t> self_bdim;
  16914. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  16915. Tensor index_value;
  16916. optional<int64_t> index_bdim;
  16917. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  16918. Tensor value_value;
  16919. optional<int64_t> value_bdim;
  16920. std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
  16921. auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value_value, value_bdim);
  16922. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  16923. }
  16924. template <typename batch_rule_t, batch_rule_t batch_rule>
  16925. at::Tensor scatter_src_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
  16926. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16927. auto maybe_layer = maybeCurrentDynamicLayer();
  16928. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  16929. int64_t cur_level = maybe_layer->layerId();
  16930. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
  16931. return at::_ops::scatter_src::call(self, dim, index, src);
  16932. }
  16933. Tensor self_value;
  16934. optional<int64_t> self_bdim;
  16935. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  16936. Tensor index_value;
  16937. optional<int64_t> index_bdim;
  16938. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  16939. Tensor src_value;
  16940. optional<int64_t> src_bdim;
  16941. std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
  16942. auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim);
  16943. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  16944. }
  16945. template <typename batch_rule_t, batch_rule_t batch_rule>
  16946. at::Tensor & scatter__src_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
  16947. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16948. auto maybe_layer = maybeCurrentDynamicLayer();
  16949. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  16950. int64_t cur_level = maybe_layer->layerId();
  16951. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
  16952. return at::_ops::scatter__src::call(self, dim, index, src);
  16953. }
  16954. Tensor self_value;
  16955. optional<int64_t> self_bdim;
  16956. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  16957. Tensor index_value;
  16958. optional<int64_t> index_bdim;
  16959. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  16960. Tensor src_value;
  16961. optional<int64_t> src_bdim;
  16962. std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
  16963. batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim);
  16964. return self;
  16965. }
  16966. template <typename batch_rule_t, batch_rule_t batch_rule>
  16967. at::Tensor scatter_value_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
  16968. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16969. auto maybe_layer = maybeCurrentDynamicLayer();
  16970. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  16971. int64_t cur_level = maybe_layer->layerId();
  16972. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
  16973. return at::_ops::scatter_value::call(self, dim, index, value);
  16974. }
  16975. Tensor self_value;
  16976. optional<int64_t> self_bdim;
  16977. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  16978. Tensor index_value;
  16979. optional<int64_t> index_bdim;
  16980. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  16981. auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value);
  16982. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  16983. }
  16984. template <typename batch_rule_t, batch_rule_t batch_rule>
  16985. at::Tensor & scatter__value_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
  16986. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  16987. auto maybe_layer = maybeCurrentDynamicLayer();
  16988. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  16989. int64_t cur_level = maybe_layer->layerId();
  16990. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
  16991. return at::_ops::scatter__value::call(self, dim, index, value);
  16992. }
  16993. Tensor self_value;
  16994. optional<int64_t> self_bdim;
  16995. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  16996. Tensor index_value;
  16997. optional<int64_t> index_bdim;
  16998. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  16999. batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value);
  17000. return self;
  17001. }
  17002. template <typename batch_rule_t, batch_rule_t batch_rule>
  17003. at::Tensor scatter_reduce_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) {
  17004. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17005. auto maybe_layer = maybeCurrentDynamicLayer();
  17006. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  17007. int64_t cur_level = maybe_layer->layerId();
  17008. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
  17009. return at::_ops::scatter_reduce::call(self, dim, index, src, reduce);
  17010. }
  17011. Tensor self_value;
  17012. optional<int64_t> self_bdim;
  17013. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17014. Tensor index_value;
  17015. optional<int64_t> index_bdim;
  17016. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  17017. Tensor src_value;
  17018. optional<int64_t> src_bdim;
  17019. std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
  17020. auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim, reduce);
  17021. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  17022. }
  17023. template <typename batch_rule_t, batch_rule_t batch_rule>
  17024. at::Tensor & scatter__reduce_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) {
  17025. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17026. auto maybe_layer = maybeCurrentDynamicLayer();
  17027. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  17028. int64_t cur_level = maybe_layer->layerId();
  17029. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
  17030. return at::_ops::scatter__reduce::call(self, dim, index, src, reduce);
  17031. }
  17032. Tensor self_value;
  17033. optional<int64_t> self_bdim;
  17034. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17035. Tensor index_value;
  17036. optional<int64_t> index_bdim;
  17037. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  17038. Tensor src_value;
  17039. optional<int64_t> src_bdim;
  17040. std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
  17041. batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim, reduce);
  17042. return self;
  17043. }
  17044. template <typename batch_rule_t, batch_rule_t batch_rule>
  17045. at::Tensor scatter_value_reduce_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) {
  17046. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17047. auto maybe_layer = maybeCurrentDynamicLayer();
  17048. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  17049. int64_t cur_level = maybe_layer->layerId();
  17050. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
  17051. return at::_ops::scatter_value_reduce::call(self, dim, index, value, reduce);
  17052. }
  17053. Tensor self_value;
  17054. optional<int64_t> self_bdim;
  17055. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17056. Tensor index_value;
  17057. optional<int64_t> index_bdim;
  17058. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  17059. auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value, reduce);
  17060. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  17061. }
  17062. template <typename batch_rule_t, batch_rule_t batch_rule>
  17063. at::Tensor & scatter__value_reduce_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) {
  17064. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17065. auto maybe_layer = maybeCurrentDynamicLayer();
  17066. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  17067. int64_t cur_level = maybe_layer->layerId();
  17068. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
  17069. return at::_ops::scatter__value_reduce::call(self, dim, index, value, reduce);
  17070. }
  17071. Tensor self_value;
  17072. optional<int64_t> self_bdim;
  17073. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17074. Tensor index_value;
  17075. optional<int64_t> index_bdim;
  17076. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  17077. batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value, reduce);
  17078. return self;
  17079. }
  17080. template <typename batch_rule_t, batch_rule_t batch_rule>
  17081. at::Tensor scatter_dimname_src_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src) {
  17082. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17083. auto maybe_layer = maybeCurrentDynamicLayer();
  17084. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  17085. int64_t cur_level = maybe_layer->layerId();
  17086. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
  17087. return at::_ops::scatter_dimname_src::call(self, dim, index, src);
  17088. }
  17089. Tensor self_value;
  17090. optional<int64_t> self_bdim;
  17091. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17092. Tensor index_value;
  17093. optional<int64_t> index_bdim;
  17094. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  17095. Tensor src_value;
  17096. optional<int64_t> src_bdim;
  17097. std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
  17098. auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim);
  17099. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  17100. }
  17101. template <typename batch_rule_t, batch_rule_t batch_rule>
  17102. at::Tensor scatter_dimname_value_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) {
  17103. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17104. auto maybe_layer = maybeCurrentDynamicLayer();
  17105. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  17106. int64_t cur_level = maybe_layer->layerId();
  17107. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
  17108. return at::_ops::scatter_dimname_value::call(self, dim, index, value);
  17109. }
  17110. Tensor self_value;
  17111. optional<int64_t> self_bdim;
  17112. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17113. Tensor index_value;
  17114. optional<int64_t> index_bdim;
  17115. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  17116. auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value);
  17117. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  17118. }
  17119. template <typename batch_rule_t, batch_rule_t batch_rule>
  17120. at::Tensor scatter_add_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
  17121. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17122. auto maybe_layer = maybeCurrentDynamicLayer();
  17123. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  17124. int64_t cur_level = maybe_layer->layerId();
  17125. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
  17126. return at::_ops::scatter_add::call(self, dim, index, src);
  17127. }
  17128. Tensor self_value;
  17129. optional<int64_t> self_bdim;
  17130. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17131. Tensor index_value;
  17132. optional<int64_t> index_bdim;
  17133. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  17134. Tensor src_value;
  17135. optional<int64_t> src_bdim;
  17136. std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
  17137. auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim);
  17138. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  17139. }
  17140. template <typename batch_rule_t, batch_rule_t batch_rule>
  17141. at::Tensor & scatter_add__generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
  17142. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17143. auto maybe_layer = maybeCurrentDynamicLayer();
  17144. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  17145. int64_t cur_level = maybe_layer->layerId();
  17146. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
  17147. return at::_ops::scatter_add_::call(self, dim, index, src);
  17148. }
  17149. Tensor self_value;
  17150. optional<int64_t> self_bdim;
  17151. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17152. Tensor index_value;
  17153. optional<int64_t> index_bdim;
  17154. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  17155. Tensor src_value;
  17156. optional<int64_t> src_bdim;
  17157. std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
  17158. batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim);
  17159. return self;
  17160. }
  17161. template <typename batch_rule_t, batch_rule_t batch_rule>
  17162. at::Tensor scatter_add_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src) {
  17163. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17164. auto maybe_layer = maybeCurrentDynamicLayer();
  17165. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  17166. int64_t cur_level = maybe_layer->layerId();
  17167. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
  17168. return at::_ops::scatter_add_dimname::call(self, dim, index, src);
  17169. }
  17170. Tensor self_value;
  17171. optional<int64_t> self_bdim;
  17172. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17173. Tensor index_value;
  17174. optional<int64_t> index_bdim;
  17175. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  17176. Tensor src_value;
  17177. optional<int64_t> src_bdim;
  17178. std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
  17179. auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim);
  17180. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  17181. }
  17182. template <typename batch_rule_t, batch_rule_t batch_rule>
  17183. at::Tensor scatter_reduce_two_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) {
  17184. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17185. auto maybe_layer = maybeCurrentDynamicLayer();
  17186. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  17187. int64_t cur_level = maybe_layer->layerId();
  17188. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
  17189. return at::_ops::scatter_reduce_two::call(self, dim, index, src, reduce, include_self);
  17190. }
  17191. Tensor self_value;
  17192. optional<int64_t> self_bdim;
  17193. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17194. Tensor index_value;
  17195. optional<int64_t> index_bdim;
  17196. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  17197. Tensor src_value;
  17198. optional<int64_t> src_bdim;
  17199. std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
  17200. auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim, reduce, include_self);
  17201. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  17202. }
  17203. template <typename batch_rule_t, batch_rule_t batch_rule>
  17204. at::Tensor & scatter_reduce__two_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) {
  17205. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17206. auto maybe_layer = maybeCurrentDynamicLayer();
  17207. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  17208. int64_t cur_level = maybe_layer->layerId();
  17209. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
  17210. return at::_ops::scatter_reduce__two::call(self, dim, index, src, reduce, include_self);
  17211. }
  17212. Tensor self_value;
  17213. optional<int64_t> self_bdim;
  17214. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17215. Tensor index_value;
  17216. optional<int64_t> index_bdim;
  17217. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  17218. Tensor src_value;
  17219. optional<int64_t> src_bdim;
  17220. std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
  17221. batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim, reduce, include_self);
  17222. return self;
  17223. }
  17224. template <typename batch_rule_t, batch_rule_t batch_rule>
  17225. at::Tensor & eq__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
  17226. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17227. auto maybe_layer = maybeCurrentDynamicLayer();
  17228. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  17229. int64_t cur_level = maybe_layer->layerId();
  17230. if (!isBatchedAtLevel(self, cur_level)) {
  17231. return at::_ops::eq__Scalar::call(self, other);
  17232. }
  17233. Tensor self_value;
  17234. optional<int64_t> self_bdim;
  17235. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17236. batch_rule(self_value, self_bdim, other);
  17237. return self;
  17238. }
  17239. template <typename batch_rule_t, batch_rule_t batch_rule>
  17240. at::Tensor & eq__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  17241. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17242. auto maybe_layer = maybeCurrentDynamicLayer();
  17243. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  17244. int64_t cur_level = maybe_layer->layerId();
  17245. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  17246. return at::_ops::eq__Tensor::call(self, other);
  17247. }
  17248. Tensor self_value;
  17249. optional<int64_t> self_bdim;
  17250. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17251. Tensor other_value;
  17252. optional<int64_t> other_bdim;
  17253. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  17254. batch_rule(self_value, self_bdim, other_value, other_bdim);
  17255. return self;
  17256. }
  17257. template <typename batch_rule_t, batch_rule_t batch_rule>
  17258. at::Tensor bitwise_and_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
  17259. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17260. auto maybe_layer = maybeCurrentDynamicLayer();
  17261. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  17262. int64_t cur_level = maybe_layer->layerId();
  17263. if (!isBatchedAtLevel(self, cur_level)) {
  17264. return at::_ops::bitwise_and_Scalar::call(self, other);
  17265. }
  17266. Tensor self_value;
  17267. optional<int64_t> self_bdim;
  17268. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17269. auto results = batch_rule(self_value, self_bdim, other);
  17270. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  17271. }
  17272. template <typename batch_rule_t, batch_rule_t batch_rule>
  17273. at::Tensor bitwise_and_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
  17274. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17275. auto maybe_layer = maybeCurrentDynamicLayer();
  17276. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  17277. int64_t cur_level = maybe_layer->layerId();
  17278. if (!isBatchedAtLevel(other, cur_level)) {
  17279. return at::_ops::bitwise_and_Scalar_Tensor::call(self, other);
  17280. }
  17281. Tensor other_value;
  17282. optional<int64_t> other_bdim;
  17283. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  17284. auto results = batch_rule(self, other_value, other_bdim);
  17285. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  17286. }
  17287. template <typename batch_rule_t, batch_rule_t batch_rule>
  17288. at::Tensor bitwise_and_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  17289. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17290. auto maybe_layer = maybeCurrentDynamicLayer();
  17291. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  17292. int64_t cur_level = maybe_layer->layerId();
  17293. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  17294. return at::_ops::bitwise_and_Tensor::call(self, other);
  17295. }
  17296. Tensor self_value;
  17297. optional<int64_t> self_bdim;
  17298. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17299. Tensor other_value;
  17300. optional<int64_t> other_bdim;
  17301. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  17302. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  17303. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  17304. }
  17305. template <typename batch_rule_t, batch_rule_t batch_rule>
  17306. at::Tensor & bitwise_and__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
  17307. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17308. auto maybe_layer = maybeCurrentDynamicLayer();
  17309. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  17310. int64_t cur_level = maybe_layer->layerId();
  17311. if (!isBatchedAtLevel(self, cur_level)) {
  17312. return at::_ops::bitwise_and__Scalar::call(self, other);
  17313. }
  17314. Tensor self_value;
  17315. optional<int64_t> self_bdim;
  17316. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17317. batch_rule(self_value, self_bdim, other);
  17318. return self;
  17319. }
  17320. template <typename batch_rule_t, batch_rule_t batch_rule>
  17321. at::Tensor & bitwise_and__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  17322. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17323. auto maybe_layer = maybeCurrentDynamicLayer();
  17324. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  17325. int64_t cur_level = maybe_layer->layerId();
  17326. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  17327. return at::_ops::bitwise_and__Tensor::call(self, other);
  17328. }
  17329. Tensor self_value;
  17330. optional<int64_t> self_bdim;
  17331. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17332. Tensor other_value;
  17333. optional<int64_t> other_bdim;
  17334. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  17335. batch_rule(self_value, self_bdim, other_value, other_bdim);
  17336. return self;
  17337. }
  17338. template <typename batch_rule_t, batch_rule_t batch_rule>
  17339. at::Tensor __and___Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
  17340. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17341. auto maybe_layer = maybeCurrentDynamicLayer();
  17342. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  17343. int64_t cur_level = maybe_layer->layerId();
  17344. if (!isBatchedAtLevel(self, cur_level)) {
  17345. return at::_ops::__and___Scalar::call(self, other);
  17346. }
  17347. Tensor self_value;
  17348. optional<int64_t> self_bdim;
  17349. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17350. auto results = batch_rule(self_value, self_bdim, other);
  17351. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  17352. }
  17353. template <typename batch_rule_t, batch_rule_t batch_rule>
  17354. at::Tensor __and___Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  17355. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17356. auto maybe_layer = maybeCurrentDynamicLayer();
  17357. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  17358. int64_t cur_level = maybe_layer->layerId();
  17359. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  17360. return at::_ops::__and___Tensor::call(self, other);
  17361. }
  17362. Tensor self_value;
  17363. optional<int64_t> self_bdim;
  17364. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17365. Tensor other_value;
  17366. optional<int64_t> other_bdim;
  17367. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  17368. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  17369. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  17370. }
  17371. template <typename batch_rule_t, batch_rule_t batch_rule>
  17372. at::Tensor & __iand___Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
  17373. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17374. auto maybe_layer = maybeCurrentDynamicLayer();
  17375. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  17376. int64_t cur_level = maybe_layer->layerId();
  17377. if (!isBatchedAtLevel(self, cur_level)) {
  17378. return at::_ops::__iand___Scalar::call(self, other);
  17379. }
  17380. Tensor self_value;
  17381. optional<int64_t> self_bdim;
  17382. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17383. batch_rule(self_value, self_bdim, other);
  17384. return self;
  17385. }
  17386. template <typename batch_rule_t, batch_rule_t batch_rule>
  17387. at::Tensor & __iand___Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  17388. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17389. auto maybe_layer = maybeCurrentDynamicLayer();
  17390. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  17391. int64_t cur_level = maybe_layer->layerId();
  17392. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  17393. return at::_ops::__iand___Tensor::call(self, other);
  17394. }
  17395. Tensor self_value;
  17396. optional<int64_t> self_bdim;
  17397. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17398. Tensor other_value;
  17399. optional<int64_t> other_bdim;
  17400. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  17401. batch_rule(self_value, self_bdim, other_value, other_bdim);
  17402. return self;
  17403. }
  17404. template <typename batch_rule_t, batch_rule_t batch_rule>
  17405. at::Tensor bitwise_or_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
  17406. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17407. auto maybe_layer = maybeCurrentDynamicLayer();
  17408. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  17409. int64_t cur_level = maybe_layer->layerId();
  17410. if (!isBatchedAtLevel(self, cur_level)) {
  17411. return at::_ops::bitwise_or_Scalar::call(self, other);
  17412. }
  17413. Tensor self_value;
  17414. optional<int64_t> self_bdim;
  17415. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17416. auto results = batch_rule(self_value, self_bdim, other);
  17417. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  17418. }
  17419. template <typename batch_rule_t, batch_rule_t batch_rule>
  17420. at::Tensor bitwise_or_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
  17421. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17422. auto maybe_layer = maybeCurrentDynamicLayer();
  17423. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  17424. int64_t cur_level = maybe_layer->layerId();
  17425. if (!isBatchedAtLevel(other, cur_level)) {
  17426. return at::_ops::bitwise_or_Scalar_Tensor::call(self, other);
  17427. }
  17428. Tensor other_value;
  17429. optional<int64_t> other_bdim;
  17430. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  17431. auto results = batch_rule(self, other_value, other_bdim);
  17432. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  17433. }
  17434. template <typename batch_rule_t, batch_rule_t batch_rule>
  17435. at::Tensor bitwise_or_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  17436. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17437. auto maybe_layer = maybeCurrentDynamicLayer();
  17438. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  17439. int64_t cur_level = maybe_layer->layerId();
  17440. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  17441. return at::_ops::bitwise_or_Tensor::call(self, other);
  17442. }
  17443. Tensor self_value;
  17444. optional<int64_t> self_bdim;
  17445. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17446. Tensor other_value;
  17447. optional<int64_t> other_bdim;
  17448. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  17449. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  17450. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  17451. }
  17452. template <typename batch_rule_t, batch_rule_t batch_rule>
  17453. at::Tensor & bitwise_or__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
  17454. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17455. auto maybe_layer = maybeCurrentDynamicLayer();
  17456. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  17457. int64_t cur_level = maybe_layer->layerId();
  17458. if (!isBatchedAtLevel(self, cur_level)) {
  17459. return at::_ops::bitwise_or__Scalar::call(self, other);
  17460. }
  17461. Tensor self_value;
  17462. optional<int64_t> self_bdim;
  17463. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17464. batch_rule(self_value, self_bdim, other);
  17465. return self;
  17466. }
  17467. template <typename batch_rule_t, batch_rule_t batch_rule>
  17468. at::Tensor & bitwise_or__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  17469. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17470. auto maybe_layer = maybeCurrentDynamicLayer();
  17471. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  17472. int64_t cur_level = maybe_layer->layerId();
  17473. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  17474. return at::_ops::bitwise_or__Tensor::call(self, other);
  17475. }
  17476. Tensor self_value;
  17477. optional<int64_t> self_bdim;
  17478. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17479. Tensor other_value;
  17480. optional<int64_t> other_bdim;
  17481. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  17482. batch_rule(self_value, self_bdim, other_value, other_bdim);
  17483. return self;
  17484. }
  17485. template <typename batch_rule_t, batch_rule_t batch_rule>
  17486. at::Tensor __or___Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
  17487. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17488. auto maybe_layer = maybeCurrentDynamicLayer();
  17489. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  17490. int64_t cur_level = maybe_layer->layerId();
  17491. if (!isBatchedAtLevel(self, cur_level)) {
  17492. return at::_ops::__or___Scalar::call(self, other);
  17493. }
  17494. Tensor self_value;
  17495. optional<int64_t> self_bdim;
  17496. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17497. auto results = batch_rule(self_value, self_bdim, other);
  17498. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  17499. }
  17500. template <typename batch_rule_t, batch_rule_t batch_rule>
  17501. at::Tensor __or___Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  17502. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17503. auto maybe_layer = maybeCurrentDynamicLayer();
  17504. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  17505. int64_t cur_level = maybe_layer->layerId();
  17506. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  17507. return at::_ops::__or___Tensor::call(self, other);
  17508. }
  17509. Tensor self_value;
  17510. optional<int64_t> self_bdim;
  17511. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17512. Tensor other_value;
  17513. optional<int64_t> other_bdim;
  17514. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  17515. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  17516. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  17517. }
  17518. template <typename batch_rule_t, batch_rule_t batch_rule>
  17519. at::Tensor & __ior___Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
  17520. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17521. auto maybe_layer = maybeCurrentDynamicLayer();
  17522. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  17523. int64_t cur_level = maybe_layer->layerId();
  17524. if (!isBatchedAtLevel(self, cur_level)) {
  17525. return at::_ops::__ior___Scalar::call(self, other);
  17526. }
  17527. Tensor self_value;
  17528. optional<int64_t> self_bdim;
  17529. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17530. batch_rule(self_value, self_bdim, other);
  17531. return self;
  17532. }
  17533. template <typename batch_rule_t, batch_rule_t batch_rule>
  17534. at::Tensor & __ior___Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  17535. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17536. auto maybe_layer = maybeCurrentDynamicLayer();
  17537. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  17538. int64_t cur_level = maybe_layer->layerId();
  17539. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  17540. return at::_ops::__ior___Tensor::call(self, other);
  17541. }
  17542. Tensor self_value;
  17543. optional<int64_t> self_bdim;
  17544. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17545. Tensor other_value;
  17546. optional<int64_t> other_bdim;
  17547. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  17548. batch_rule(self_value, self_bdim, other_value, other_bdim);
  17549. return self;
  17550. }
  17551. template <typename batch_rule_t, batch_rule_t batch_rule>
  17552. at::Tensor bitwise_xor_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
  17553. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17554. auto maybe_layer = maybeCurrentDynamicLayer();
  17555. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  17556. int64_t cur_level = maybe_layer->layerId();
  17557. if (!isBatchedAtLevel(self, cur_level)) {
  17558. return at::_ops::bitwise_xor_Scalar::call(self, other);
  17559. }
  17560. Tensor self_value;
  17561. optional<int64_t> self_bdim;
  17562. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17563. auto results = batch_rule(self_value, self_bdim, other);
  17564. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  17565. }
  17566. template <typename batch_rule_t, batch_rule_t batch_rule>
  17567. at::Tensor bitwise_xor_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
  17568. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17569. auto maybe_layer = maybeCurrentDynamicLayer();
  17570. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  17571. int64_t cur_level = maybe_layer->layerId();
  17572. if (!isBatchedAtLevel(other, cur_level)) {
  17573. return at::_ops::bitwise_xor_Scalar_Tensor::call(self, other);
  17574. }
  17575. Tensor other_value;
  17576. optional<int64_t> other_bdim;
  17577. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  17578. auto results = batch_rule(self, other_value, other_bdim);
  17579. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  17580. }
  17581. template <typename batch_rule_t, batch_rule_t batch_rule>
  17582. at::Tensor bitwise_xor_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  17583. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17584. auto maybe_layer = maybeCurrentDynamicLayer();
  17585. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  17586. int64_t cur_level = maybe_layer->layerId();
  17587. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  17588. return at::_ops::bitwise_xor_Tensor::call(self, other);
  17589. }
  17590. Tensor self_value;
  17591. optional<int64_t> self_bdim;
  17592. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17593. Tensor other_value;
  17594. optional<int64_t> other_bdim;
  17595. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  17596. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  17597. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  17598. }
  17599. template <typename batch_rule_t, batch_rule_t batch_rule>
  17600. at::Tensor & bitwise_xor__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
  17601. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17602. auto maybe_layer = maybeCurrentDynamicLayer();
  17603. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  17604. int64_t cur_level = maybe_layer->layerId();
  17605. if (!isBatchedAtLevel(self, cur_level)) {
  17606. return at::_ops::bitwise_xor__Scalar::call(self, other);
  17607. }
  17608. Tensor self_value;
  17609. optional<int64_t> self_bdim;
  17610. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17611. batch_rule(self_value, self_bdim, other);
  17612. return self;
  17613. }
  17614. template <typename batch_rule_t, batch_rule_t batch_rule>
  17615. at::Tensor & bitwise_xor__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  17616. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17617. auto maybe_layer = maybeCurrentDynamicLayer();
  17618. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  17619. int64_t cur_level = maybe_layer->layerId();
  17620. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  17621. return at::_ops::bitwise_xor__Tensor::call(self, other);
  17622. }
  17623. Tensor self_value;
  17624. optional<int64_t> self_bdim;
  17625. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17626. Tensor other_value;
  17627. optional<int64_t> other_bdim;
  17628. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  17629. batch_rule(self_value, self_bdim, other_value, other_bdim);
  17630. return self;
  17631. }
  17632. template <typename batch_rule_t, batch_rule_t batch_rule>
  17633. at::Tensor __xor___Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
  17634. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17635. auto maybe_layer = maybeCurrentDynamicLayer();
  17636. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  17637. int64_t cur_level = maybe_layer->layerId();
  17638. if (!isBatchedAtLevel(self, cur_level)) {
  17639. return at::_ops::__xor___Scalar::call(self, other);
  17640. }
  17641. Tensor self_value;
  17642. optional<int64_t> self_bdim;
  17643. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17644. auto results = batch_rule(self_value, self_bdim, other);
  17645. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  17646. }
  17647. template <typename batch_rule_t, batch_rule_t batch_rule>
  17648. at::Tensor __xor___Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  17649. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17650. auto maybe_layer = maybeCurrentDynamicLayer();
  17651. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  17652. int64_t cur_level = maybe_layer->layerId();
  17653. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  17654. return at::_ops::__xor___Tensor::call(self, other);
  17655. }
  17656. Tensor self_value;
  17657. optional<int64_t> self_bdim;
  17658. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17659. Tensor other_value;
  17660. optional<int64_t> other_bdim;
  17661. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  17662. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  17663. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  17664. }
  17665. template <typename batch_rule_t, batch_rule_t batch_rule>
  17666. at::Tensor & __ixor___Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
  17667. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17668. auto maybe_layer = maybeCurrentDynamicLayer();
  17669. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  17670. int64_t cur_level = maybe_layer->layerId();
  17671. if (!isBatchedAtLevel(self, cur_level)) {
  17672. return at::_ops::__ixor___Scalar::call(self, other);
  17673. }
  17674. Tensor self_value;
  17675. optional<int64_t> self_bdim;
  17676. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17677. batch_rule(self_value, self_bdim, other);
  17678. return self;
  17679. }
  17680. template <typename batch_rule_t, batch_rule_t batch_rule>
  17681. at::Tensor & __ixor___Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  17682. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17683. auto maybe_layer = maybeCurrentDynamicLayer();
  17684. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  17685. int64_t cur_level = maybe_layer->layerId();
  17686. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  17687. return at::_ops::__ixor___Tensor::call(self, other);
  17688. }
  17689. Tensor self_value;
  17690. optional<int64_t> self_bdim;
  17691. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17692. Tensor other_value;
  17693. optional<int64_t> other_bdim;
  17694. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  17695. batch_rule(self_value, self_bdim, other_value, other_bdim);
  17696. return self;
  17697. }
  17698. template <typename batch_rule_t, batch_rule_t batch_rule>
  17699. at::Tensor __lshift___Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
  17700. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17701. auto maybe_layer = maybeCurrentDynamicLayer();
  17702. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  17703. int64_t cur_level = maybe_layer->layerId();
  17704. if (!isBatchedAtLevel(self, cur_level)) {
  17705. return at::_ops::__lshift___Scalar::call(self, other);
  17706. }
  17707. Tensor self_value;
  17708. optional<int64_t> self_bdim;
  17709. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17710. auto results = batch_rule(self_value, self_bdim, other);
  17711. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  17712. }
  17713. template <typename batch_rule_t, batch_rule_t batch_rule>
  17714. at::Tensor __lshift___Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  17715. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17716. auto maybe_layer = maybeCurrentDynamicLayer();
  17717. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  17718. int64_t cur_level = maybe_layer->layerId();
  17719. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  17720. return at::_ops::__lshift___Tensor::call(self, other);
  17721. }
  17722. Tensor self_value;
  17723. optional<int64_t> self_bdim;
  17724. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17725. Tensor other_value;
  17726. optional<int64_t> other_bdim;
  17727. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  17728. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  17729. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  17730. }
  17731. template <typename batch_rule_t, batch_rule_t batch_rule>
  17732. at::Tensor & __ilshift___Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
  17733. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17734. auto maybe_layer = maybeCurrentDynamicLayer();
  17735. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  17736. int64_t cur_level = maybe_layer->layerId();
  17737. if (!isBatchedAtLevel(self, cur_level)) {
  17738. return at::_ops::__ilshift___Scalar::call(self, other);
  17739. }
  17740. Tensor self_value;
  17741. optional<int64_t> self_bdim;
  17742. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17743. batch_rule(self_value, self_bdim, other);
  17744. return self;
  17745. }
  17746. template <typename batch_rule_t, batch_rule_t batch_rule>
  17747. at::Tensor & __ilshift___Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  17748. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17749. auto maybe_layer = maybeCurrentDynamicLayer();
  17750. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  17751. int64_t cur_level = maybe_layer->layerId();
  17752. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  17753. return at::_ops::__ilshift___Tensor::call(self, other);
  17754. }
  17755. Tensor self_value;
  17756. optional<int64_t> self_bdim;
  17757. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17758. Tensor other_value;
  17759. optional<int64_t> other_bdim;
  17760. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  17761. batch_rule(self_value, self_bdim, other_value, other_bdim);
  17762. return self;
  17763. }
  17764. template <typename batch_rule_t, batch_rule_t batch_rule>
  17765. at::Tensor bitwise_left_shift_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  17766. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17767. auto maybe_layer = maybeCurrentDynamicLayer();
  17768. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  17769. int64_t cur_level = maybe_layer->layerId();
  17770. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  17771. return at::_ops::bitwise_left_shift_Tensor::call(self, other);
  17772. }
  17773. Tensor self_value;
  17774. optional<int64_t> self_bdim;
  17775. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17776. Tensor other_value;
  17777. optional<int64_t> other_bdim;
  17778. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  17779. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  17780. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  17781. }
  17782. template <typename batch_rule_t, batch_rule_t batch_rule>
  17783. at::Tensor & bitwise_left_shift__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  17784. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17785. auto maybe_layer = maybeCurrentDynamicLayer();
  17786. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  17787. int64_t cur_level = maybe_layer->layerId();
  17788. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  17789. return at::_ops::bitwise_left_shift__Tensor::call(self, other);
  17790. }
  17791. Tensor self_value;
  17792. optional<int64_t> self_bdim;
  17793. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17794. Tensor other_value;
  17795. optional<int64_t> other_bdim;
  17796. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  17797. batch_rule(self_value, self_bdim, other_value, other_bdim);
  17798. return self;
  17799. }
  17800. template <typename batch_rule_t, batch_rule_t batch_rule>
  17801. at::Tensor bitwise_left_shift_Tensor_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
  17802. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17803. auto maybe_layer = maybeCurrentDynamicLayer();
  17804. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  17805. int64_t cur_level = maybe_layer->layerId();
  17806. if (!isBatchedAtLevel(self, cur_level)) {
  17807. return at::_ops::bitwise_left_shift_Tensor_Scalar::call(self, other);
  17808. }
  17809. Tensor self_value;
  17810. optional<int64_t> self_bdim;
  17811. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17812. auto results = batch_rule(self_value, self_bdim, other);
  17813. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  17814. }
  17815. template <typename batch_rule_t, batch_rule_t batch_rule>
  17816. at::Tensor & bitwise_left_shift__Tensor_Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
  17817. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17818. auto maybe_layer = maybeCurrentDynamicLayer();
  17819. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  17820. int64_t cur_level = maybe_layer->layerId();
  17821. if (!isBatchedAtLevel(self, cur_level)) {
  17822. return at::_ops::bitwise_left_shift__Tensor_Scalar::call(self, other);
  17823. }
  17824. Tensor self_value;
  17825. optional<int64_t> self_bdim;
  17826. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17827. batch_rule(self_value, self_bdim, other);
  17828. return self;
  17829. }
  17830. template <typename batch_rule_t, batch_rule_t batch_rule>
  17831. at::Tensor bitwise_left_shift_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
  17832. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17833. auto maybe_layer = maybeCurrentDynamicLayer();
  17834. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  17835. int64_t cur_level = maybe_layer->layerId();
  17836. if (!isBatchedAtLevel(other, cur_level)) {
  17837. return at::_ops::bitwise_left_shift_Scalar_Tensor::call(self, other);
  17838. }
  17839. Tensor other_value;
  17840. optional<int64_t> other_bdim;
  17841. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  17842. auto results = batch_rule(self, other_value, other_bdim);
  17843. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  17844. }
  17845. template <typename batch_rule_t, batch_rule_t batch_rule>
  17846. at::Tensor __rshift___Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
  17847. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17848. auto maybe_layer = maybeCurrentDynamicLayer();
  17849. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  17850. int64_t cur_level = maybe_layer->layerId();
  17851. if (!isBatchedAtLevel(self, cur_level)) {
  17852. return at::_ops::__rshift___Scalar::call(self, other);
  17853. }
  17854. Tensor self_value;
  17855. optional<int64_t> self_bdim;
  17856. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17857. auto results = batch_rule(self_value, self_bdim, other);
  17858. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  17859. }
  17860. template <typename batch_rule_t, batch_rule_t batch_rule>
  17861. at::Tensor __rshift___Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  17862. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17863. auto maybe_layer = maybeCurrentDynamicLayer();
  17864. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  17865. int64_t cur_level = maybe_layer->layerId();
  17866. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  17867. return at::_ops::__rshift___Tensor::call(self, other);
  17868. }
  17869. Tensor self_value;
  17870. optional<int64_t> self_bdim;
  17871. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17872. Tensor other_value;
  17873. optional<int64_t> other_bdim;
  17874. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  17875. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  17876. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  17877. }
  17878. template <typename batch_rule_t, batch_rule_t batch_rule>
  17879. at::Tensor & __irshift___Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
  17880. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17881. auto maybe_layer = maybeCurrentDynamicLayer();
  17882. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  17883. int64_t cur_level = maybe_layer->layerId();
  17884. if (!isBatchedAtLevel(self, cur_level)) {
  17885. return at::_ops::__irshift___Scalar::call(self, other);
  17886. }
  17887. Tensor self_value;
  17888. optional<int64_t> self_bdim;
  17889. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17890. batch_rule(self_value, self_bdim, other);
  17891. return self;
  17892. }
  17893. template <typename batch_rule_t, batch_rule_t batch_rule>
  17894. at::Tensor & __irshift___Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  17895. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17896. auto maybe_layer = maybeCurrentDynamicLayer();
  17897. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  17898. int64_t cur_level = maybe_layer->layerId();
  17899. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  17900. return at::_ops::__irshift___Tensor::call(self, other);
  17901. }
  17902. Tensor self_value;
  17903. optional<int64_t> self_bdim;
  17904. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17905. Tensor other_value;
  17906. optional<int64_t> other_bdim;
  17907. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  17908. batch_rule(self_value, self_bdim, other_value, other_bdim);
  17909. return self;
  17910. }
  17911. template <typename batch_rule_t, batch_rule_t batch_rule>
  17912. at::Tensor bitwise_right_shift_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  17913. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17914. auto maybe_layer = maybeCurrentDynamicLayer();
  17915. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  17916. int64_t cur_level = maybe_layer->layerId();
  17917. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  17918. return at::_ops::bitwise_right_shift_Tensor::call(self, other);
  17919. }
  17920. Tensor self_value;
  17921. optional<int64_t> self_bdim;
  17922. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17923. Tensor other_value;
  17924. optional<int64_t> other_bdim;
  17925. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  17926. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  17927. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  17928. }
  17929. template <typename batch_rule_t, batch_rule_t batch_rule>
  17930. at::Tensor & bitwise_right_shift__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  17931. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17932. auto maybe_layer = maybeCurrentDynamicLayer();
  17933. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  17934. int64_t cur_level = maybe_layer->layerId();
  17935. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  17936. return at::_ops::bitwise_right_shift__Tensor::call(self, other);
  17937. }
  17938. Tensor self_value;
  17939. optional<int64_t> self_bdim;
  17940. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17941. Tensor other_value;
  17942. optional<int64_t> other_bdim;
  17943. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  17944. batch_rule(self_value, self_bdim, other_value, other_bdim);
  17945. return self;
  17946. }
  17947. template <typename batch_rule_t, batch_rule_t batch_rule>
  17948. at::Tensor bitwise_right_shift_Tensor_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
  17949. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17950. auto maybe_layer = maybeCurrentDynamicLayer();
  17951. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  17952. int64_t cur_level = maybe_layer->layerId();
  17953. if (!isBatchedAtLevel(self, cur_level)) {
  17954. return at::_ops::bitwise_right_shift_Tensor_Scalar::call(self, other);
  17955. }
  17956. Tensor self_value;
  17957. optional<int64_t> self_bdim;
  17958. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17959. auto results = batch_rule(self_value, self_bdim, other);
  17960. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  17961. }
  17962. template <typename batch_rule_t, batch_rule_t batch_rule>
  17963. at::Tensor & bitwise_right_shift__Tensor_Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
  17964. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17965. auto maybe_layer = maybeCurrentDynamicLayer();
  17966. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  17967. int64_t cur_level = maybe_layer->layerId();
  17968. if (!isBatchedAtLevel(self, cur_level)) {
  17969. return at::_ops::bitwise_right_shift__Tensor_Scalar::call(self, other);
  17970. }
  17971. Tensor self_value;
  17972. optional<int64_t> self_bdim;
  17973. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  17974. batch_rule(self_value, self_bdim, other);
  17975. return self;
  17976. }
  17977. template <typename batch_rule_t, batch_rule_t batch_rule>
  17978. at::Tensor bitwise_right_shift_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
  17979. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17980. auto maybe_layer = maybeCurrentDynamicLayer();
  17981. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  17982. int64_t cur_level = maybe_layer->layerId();
  17983. if (!isBatchedAtLevel(other, cur_level)) {
  17984. return at::_ops::bitwise_right_shift_Scalar_Tensor::call(self, other);
  17985. }
  17986. Tensor other_value;
  17987. optional<int64_t> other_bdim;
  17988. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  17989. auto results = batch_rule(self, other_value, other_bdim);
  17990. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  17991. }
  17992. template <typename batch_rule_t, batch_rule_t batch_rule>
  17993. at::Tensor & tril__generated_plumbing(at::Tensor & self, int64_t diagonal) {
  17994. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  17995. auto maybe_layer = maybeCurrentDynamicLayer();
  17996. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  17997. int64_t cur_level = maybe_layer->layerId();
  17998. if (!isBatchedAtLevel(self, cur_level)) {
  17999. return at::_ops::tril_::call(self, diagonal);
  18000. }
  18001. Tensor self_value;
  18002. optional<int64_t> self_bdim;
  18003. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18004. batch_rule(self_value, self_bdim, diagonal);
  18005. return self;
  18006. }
  18007. template <typename batch_rule_t, batch_rule_t batch_rule>
  18008. at::Tensor & triu__generated_plumbing(at::Tensor & self, int64_t diagonal) {
  18009. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18010. auto maybe_layer = maybeCurrentDynamicLayer();
  18011. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  18012. int64_t cur_level = maybe_layer->layerId();
  18013. if (!isBatchedAtLevel(self, cur_level)) {
  18014. return at::_ops::triu_::call(self, diagonal);
  18015. }
  18016. Tensor self_value;
  18017. optional<int64_t> self_bdim;
  18018. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18019. batch_rule(self_value, self_bdim, diagonal);
  18020. return self;
  18021. }
  18022. template <typename batch_rule_t, batch_rule_t batch_rule>
  18023. at::Tensor & digamma__generated_plumbing(at::Tensor & self) {
  18024. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18025. auto maybe_layer = maybeCurrentDynamicLayer();
  18026. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  18027. int64_t cur_level = maybe_layer->layerId();
  18028. if (!isBatchedAtLevel(self, cur_level)) {
  18029. return at::_ops::digamma_::call(self);
  18030. }
  18031. Tensor self_value;
  18032. optional<int64_t> self_bdim;
  18033. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18034. batch_rule(self_value, self_bdim);
  18035. return self;
  18036. }
  18037. template <typename batch_rule_t, batch_rule_t batch_rule>
  18038. at::Tensor & lerp__Scalar_generated_plumbing(at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) {
  18039. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18040. auto maybe_layer = maybeCurrentDynamicLayer();
  18041. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  18042. int64_t cur_level = maybe_layer->layerId();
  18043. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(end, cur_level)) {
  18044. return at::_ops::lerp__Scalar::call(self, end, weight);
  18045. }
  18046. Tensor self_value;
  18047. optional<int64_t> self_bdim;
  18048. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18049. Tensor end_value;
  18050. optional<int64_t> end_bdim;
  18051. std::tie(end_value, end_bdim) = unwrapTensorAtLevel(end, cur_level);
  18052. batch_rule(self_value, self_bdim, end_value, end_bdim, weight);
  18053. return self;
  18054. }
  18055. template <typename batch_rule_t, batch_rule_t batch_rule>
  18056. at::Tensor & lerp__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) {
  18057. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18058. auto maybe_layer = maybeCurrentDynamicLayer();
  18059. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  18060. int64_t cur_level = maybe_layer->layerId();
  18061. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(end, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
  18062. return at::_ops::lerp__Tensor::call(self, end, weight);
  18063. }
  18064. Tensor self_value;
  18065. optional<int64_t> self_bdim;
  18066. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18067. Tensor end_value;
  18068. optional<int64_t> end_bdim;
  18069. std::tie(end_value, end_bdim) = unwrapTensorAtLevel(end, cur_level);
  18070. Tensor weight_value;
  18071. optional<int64_t> weight_bdim;
  18072. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  18073. batch_rule(self_value, self_bdim, end_value, end_bdim, weight_value, weight_bdim);
  18074. return self;
  18075. }
  18076. template <typename batch_rule_t, batch_rule_t batch_rule>
  18077. at::Tensor & addbmm__generated_plumbing(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
  18078. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18079. auto maybe_layer = maybeCurrentDynamicLayer();
  18080. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  18081. int64_t cur_level = maybe_layer->layerId();
  18082. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(batch1, cur_level) && !isBatchedAtLevel(batch2, cur_level)) {
  18083. return at::_ops::addbmm_::call(self, batch1, batch2, beta, alpha);
  18084. }
  18085. Tensor self_value;
  18086. optional<int64_t> self_bdim;
  18087. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18088. Tensor batch1_value;
  18089. optional<int64_t> batch1_bdim;
  18090. std::tie(batch1_value, batch1_bdim) = unwrapTensorAtLevel(batch1, cur_level);
  18091. Tensor batch2_value;
  18092. optional<int64_t> batch2_bdim;
  18093. std::tie(batch2_value, batch2_bdim) = unwrapTensorAtLevel(batch2, cur_level);
  18094. batch_rule(self_value, self_bdim, batch1_value, batch1_bdim, batch2_value, batch2_bdim, beta, alpha);
  18095. return self;
  18096. }
  18097. template <typename batch_rule_t, batch_rule_t batch_rule>
  18098. at::Tensor addbmm_generated_plumbing(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
  18099. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18100. auto maybe_layer = maybeCurrentDynamicLayer();
  18101. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  18102. int64_t cur_level = maybe_layer->layerId();
  18103. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(batch1, cur_level) && !isBatchedAtLevel(batch2, cur_level)) {
  18104. return at::_ops::addbmm::call(self, batch1, batch2, beta, alpha);
  18105. }
  18106. Tensor self_value;
  18107. optional<int64_t> self_bdim;
  18108. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18109. Tensor batch1_value;
  18110. optional<int64_t> batch1_bdim;
  18111. std::tie(batch1_value, batch1_bdim) = unwrapTensorAtLevel(batch1, cur_level);
  18112. Tensor batch2_value;
  18113. optional<int64_t> batch2_bdim;
  18114. std::tie(batch2_value, batch2_bdim) = unwrapTensorAtLevel(batch2, cur_level);
  18115. auto results = batch_rule(self_value, self_bdim, batch1_value, batch1_bdim, batch2_value, batch2_bdim, beta, alpha);
  18116. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  18117. }
  18118. template <typename batch_rule_t, batch_rule_t batch_rule>
  18119. at::Tensor & random__from_generated_plumbing(at::Tensor & self, int64_t from, c10::optional<int64_t> to, c10::optional<at::Generator> generator) {
  18120. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18121. auto maybe_layer = maybeCurrentDynamicLayer();
  18122. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  18123. int64_t cur_level = maybe_layer->layerId();
  18124. if (!isBatchedAtLevel(self, cur_level)) {
  18125. return at::_ops::random__from::call(self, from, to, generator);
  18126. }
  18127. Tensor self_value;
  18128. optional<int64_t> self_bdim;
  18129. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18130. batch_rule(self_value, self_bdim, from, to, generator);
  18131. return self;
  18132. }
  18133. template <typename batch_rule_t, batch_rule_t batch_rule>
  18134. at::Tensor & random__to_generated_plumbing(at::Tensor & self, int64_t to, c10::optional<at::Generator> generator) {
  18135. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18136. auto maybe_layer = maybeCurrentDynamicLayer();
  18137. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  18138. int64_t cur_level = maybe_layer->layerId();
  18139. if (!isBatchedAtLevel(self, cur_level)) {
  18140. return at::_ops::random__to::call(self, to, generator);
  18141. }
  18142. Tensor self_value;
  18143. optional<int64_t> self_bdim;
  18144. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18145. batch_rule(self_value, self_bdim, to, generator);
  18146. return self;
  18147. }
  18148. template <typename batch_rule_t, batch_rule_t batch_rule>
  18149. at::Tensor & random__generated_plumbing(at::Tensor & self, c10::optional<at::Generator> generator) {
  18150. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18151. auto maybe_layer = maybeCurrentDynamicLayer();
  18152. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  18153. int64_t cur_level = maybe_layer->layerId();
  18154. if (!isBatchedAtLevel(self, cur_level)) {
  18155. return at::_ops::random_::call(self, generator);
  18156. }
  18157. Tensor self_value;
  18158. optional<int64_t> self_bdim;
  18159. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18160. batch_rule(self_value, self_bdim, generator);
  18161. return self;
  18162. }
  18163. template <typename batch_rule_t, batch_rule_t batch_rule>
  18164. at::Tensor & uniform__generated_plumbing(at::Tensor & self, double from, double to, c10::optional<at::Generator> generator) {
  18165. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18166. auto maybe_layer = maybeCurrentDynamicLayer();
  18167. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  18168. int64_t cur_level = maybe_layer->layerId();
  18169. if (!isBatchedAtLevel(self, cur_level)) {
  18170. return at::_ops::uniform_::call(self, from, to, generator);
  18171. }
  18172. Tensor self_value;
  18173. optional<int64_t> self_bdim;
  18174. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18175. batch_rule(self_value, self_bdim, from, to, generator);
  18176. return self;
  18177. }
  18178. template <typename batch_rule_t, batch_rule_t batch_rule>
  18179. at::Tensor & cauchy__generated_plumbing(at::Tensor & self, double median, double sigma, c10::optional<at::Generator> generator) {
  18180. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18181. auto maybe_layer = maybeCurrentDynamicLayer();
  18182. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  18183. int64_t cur_level = maybe_layer->layerId();
  18184. if (!isBatchedAtLevel(self, cur_level)) {
  18185. return at::_ops::cauchy_::call(self, median, sigma, generator);
  18186. }
  18187. Tensor self_value;
  18188. optional<int64_t> self_bdim;
  18189. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18190. batch_rule(self_value, self_bdim, median, sigma, generator);
  18191. return self;
  18192. }
  18193. template <typename batch_rule_t, batch_rule_t batch_rule>
  18194. at::Tensor & log_normal__generated_plumbing(at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) {
  18195. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18196. auto maybe_layer = maybeCurrentDynamicLayer();
  18197. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  18198. int64_t cur_level = maybe_layer->layerId();
  18199. if (!isBatchedAtLevel(self, cur_level)) {
  18200. return at::_ops::log_normal_::call(self, mean, std, generator);
  18201. }
  18202. Tensor self_value;
  18203. optional<int64_t> self_bdim;
  18204. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18205. batch_rule(self_value, self_bdim, mean, std, generator);
  18206. return self;
  18207. }
  18208. template <typename batch_rule_t, batch_rule_t batch_rule>
  18209. at::Tensor & exponential__generated_plumbing(at::Tensor & self, double lambd, c10::optional<at::Generator> generator) {
  18210. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18211. auto maybe_layer = maybeCurrentDynamicLayer();
  18212. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  18213. int64_t cur_level = maybe_layer->layerId();
  18214. if (!isBatchedAtLevel(self, cur_level)) {
  18215. return at::_ops::exponential_::call(self, lambd, generator);
  18216. }
  18217. Tensor self_value;
  18218. optional<int64_t> self_bdim;
  18219. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18220. batch_rule(self_value, self_bdim, lambd, generator);
  18221. return self;
  18222. }
  18223. template <typename batch_rule_t, batch_rule_t batch_rule>
  18224. at::Tensor & geometric__generated_plumbing(at::Tensor & self, double p, c10::optional<at::Generator> generator) {
  18225. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18226. auto maybe_layer = maybeCurrentDynamicLayer();
  18227. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  18228. int64_t cur_level = maybe_layer->layerId();
  18229. if (!isBatchedAtLevel(self, cur_level)) {
  18230. return at::_ops::geometric_::call(self, p, generator);
  18231. }
  18232. Tensor self_value;
  18233. optional<int64_t> self_bdim;
  18234. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18235. batch_rule(self_value, self_bdim, p, generator);
  18236. return self;
  18237. }
  18238. template <typename batch_rule_t, batch_rule_t batch_rule>
  18239. at::Tensor diag_generated_plumbing(const at::Tensor & self, int64_t diagonal) {
  18240. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18241. auto maybe_layer = maybeCurrentDynamicLayer();
  18242. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  18243. int64_t cur_level = maybe_layer->layerId();
  18244. if (!isBatchedAtLevel(self, cur_level)) {
  18245. return at::_ops::diag::call(self, diagonal);
  18246. }
  18247. Tensor self_value;
  18248. optional<int64_t> self_bdim;
  18249. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18250. auto results = batch_rule(self_value, self_bdim, diagonal);
  18251. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  18252. }
  18253. template <typename batch_rule_t, batch_rule_t batch_rule>
  18254. at::Tensor cross_generated_plumbing(const at::Tensor & self, const at::Tensor & other, c10::optional<int64_t> dim) {
  18255. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18256. auto maybe_layer = maybeCurrentDynamicLayer();
  18257. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  18258. int64_t cur_level = maybe_layer->layerId();
  18259. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  18260. return at::_ops::cross::call(self, other, dim);
  18261. }
  18262. Tensor self_value;
  18263. optional<int64_t> self_bdim;
  18264. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18265. Tensor other_value;
  18266. optional<int64_t> other_bdim;
  18267. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  18268. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, dim);
  18269. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  18270. }
  18271. template <typename batch_rule_t, batch_rule_t batch_rule>
  18272. at::Tensor triu_generated_plumbing(const at::Tensor & self, int64_t diagonal) {
  18273. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18274. auto maybe_layer = maybeCurrentDynamicLayer();
  18275. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  18276. int64_t cur_level = maybe_layer->layerId();
  18277. if (!isBatchedAtLevel(self, cur_level)) {
  18278. return at::_ops::triu::call(self, diagonal);
  18279. }
  18280. Tensor self_value;
  18281. optional<int64_t> self_bdim;
  18282. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18283. auto results = batch_rule(self_value, self_bdim, diagonal);
  18284. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  18285. }
  18286. template <typename batch_rule_t, batch_rule_t batch_rule>
  18287. at::Tensor tril_generated_plumbing(const at::Tensor & self, int64_t diagonal) {
  18288. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18289. auto maybe_layer = maybeCurrentDynamicLayer();
  18290. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  18291. int64_t cur_level = maybe_layer->layerId();
  18292. if (!isBatchedAtLevel(self, cur_level)) {
  18293. return at::_ops::tril::call(self, diagonal);
  18294. }
  18295. Tensor self_value;
  18296. optional<int64_t> self_bdim;
  18297. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18298. auto results = batch_rule(self_value, self_bdim, diagonal);
  18299. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  18300. }
  18301. template <typename batch_rule_t, batch_rule_t batch_rule>
  18302. at::Tensor trace_generated_plumbing(const at::Tensor & self) {
  18303. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18304. auto maybe_layer = maybeCurrentDynamicLayer();
  18305. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  18306. int64_t cur_level = maybe_layer->layerId();
  18307. if (!isBatchedAtLevel(self, cur_level)) {
  18308. return at::_ops::trace::call(self);
  18309. }
  18310. Tensor self_value;
  18311. optional<int64_t> self_bdim;
  18312. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18313. auto results = batch_rule(self_value, self_bdim);
  18314. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  18315. }
  18316. template <typename batch_rule_t, batch_rule_t batch_rule>
  18317. at::Tensor trace_backward_generated_plumbing(const at::Tensor & grad, c10::SymIntArrayRef sizes) {
  18318. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18319. auto maybe_layer = maybeCurrentDynamicLayer();
  18320. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  18321. int64_t cur_level = maybe_layer->layerId();
  18322. if (!isBatchedAtLevel(grad, cur_level)) {
  18323. return at::_ops::trace_backward::call(grad, sizes);
  18324. }
  18325. Tensor grad_value;
  18326. optional<int64_t> grad_bdim;
  18327. std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
  18328. auto results = batch_rule(grad_value, grad_bdim, sizes);
  18329. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  18330. }
  18331. template <typename batch_rule_t, batch_rule_t batch_rule>
  18332. at::Tensor ne_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
  18333. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18334. auto maybe_layer = maybeCurrentDynamicLayer();
  18335. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  18336. int64_t cur_level = maybe_layer->layerId();
  18337. if (!isBatchedAtLevel(self, cur_level)) {
  18338. return at::_ops::ne_Scalar::call(self, other);
  18339. }
  18340. Tensor self_value;
  18341. optional<int64_t> self_bdim;
  18342. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18343. auto results = batch_rule(self_value, self_bdim, other);
  18344. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  18345. }
  18346. template <typename batch_rule_t, batch_rule_t batch_rule>
  18347. at::Tensor ne_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  18348. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18349. auto maybe_layer = maybeCurrentDynamicLayer();
  18350. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  18351. int64_t cur_level = maybe_layer->layerId();
  18352. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  18353. return at::_ops::ne_Tensor::call(self, other);
  18354. }
  18355. Tensor self_value;
  18356. optional<int64_t> self_bdim;
  18357. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18358. Tensor other_value;
  18359. optional<int64_t> other_bdim;
  18360. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  18361. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  18362. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  18363. }
  18364. template <typename batch_rule_t, batch_rule_t batch_rule>
  18365. at::Tensor & ne__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
  18366. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18367. auto maybe_layer = maybeCurrentDynamicLayer();
  18368. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  18369. int64_t cur_level = maybe_layer->layerId();
  18370. if (!isBatchedAtLevel(self, cur_level)) {
  18371. return at::_ops::ne__Scalar::call(self, other);
  18372. }
  18373. Tensor self_value;
  18374. optional<int64_t> self_bdim;
  18375. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18376. batch_rule(self_value, self_bdim, other);
  18377. return self;
  18378. }
  18379. template <typename batch_rule_t, batch_rule_t batch_rule>
  18380. at::Tensor & ne__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  18381. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18382. auto maybe_layer = maybeCurrentDynamicLayer();
  18383. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  18384. int64_t cur_level = maybe_layer->layerId();
  18385. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  18386. return at::_ops::ne__Tensor::call(self, other);
  18387. }
  18388. Tensor self_value;
  18389. optional<int64_t> self_bdim;
  18390. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18391. Tensor other_value;
  18392. optional<int64_t> other_bdim;
  18393. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  18394. batch_rule(self_value, self_bdim, other_value, other_bdim);
  18395. return self;
  18396. }
  18397. template <typename batch_rule_t, batch_rule_t batch_rule>
  18398. at::Tensor not_equal_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
  18399. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18400. auto maybe_layer = maybeCurrentDynamicLayer();
  18401. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  18402. int64_t cur_level = maybe_layer->layerId();
  18403. if (!isBatchedAtLevel(self, cur_level)) {
  18404. return at::_ops::not_equal_Scalar::call(self, other);
  18405. }
  18406. Tensor self_value;
  18407. optional<int64_t> self_bdim;
  18408. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18409. auto results = batch_rule(self_value, self_bdim, other);
  18410. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  18411. }
  18412. template <typename batch_rule_t, batch_rule_t batch_rule>
  18413. at::Tensor not_equal_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  18414. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18415. auto maybe_layer = maybeCurrentDynamicLayer();
  18416. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  18417. int64_t cur_level = maybe_layer->layerId();
  18418. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  18419. return at::_ops::not_equal_Tensor::call(self, other);
  18420. }
  18421. Tensor self_value;
  18422. optional<int64_t> self_bdim;
  18423. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18424. Tensor other_value;
  18425. optional<int64_t> other_bdim;
  18426. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  18427. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  18428. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  18429. }
  18430. template <typename batch_rule_t, batch_rule_t batch_rule>
  18431. at::Tensor & not_equal__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
  18432. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18433. auto maybe_layer = maybeCurrentDynamicLayer();
  18434. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  18435. int64_t cur_level = maybe_layer->layerId();
  18436. if (!isBatchedAtLevel(self, cur_level)) {
  18437. return at::_ops::not_equal__Scalar::call(self, other);
  18438. }
  18439. Tensor self_value;
  18440. optional<int64_t> self_bdim;
  18441. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18442. batch_rule(self_value, self_bdim, other);
  18443. return self;
  18444. }
  18445. template <typename batch_rule_t, batch_rule_t batch_rule>
  18446. at::Tensor & not_equal__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  18447. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18448. auto maybe_layer = maybeCurrentDynamicLayer();
  18449. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  18450. int64_t cur_level = maybe_layer->layerId();
  18451. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  18452. return at::_ops::not_equal__Tensor::call(self, other);
  18453. }
  18454. Tensor self_value;
  18455. optional<int64_t> self_bdim;
  18456. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18457. Tensor other_value;
  18458. optional<int64_t> other_bdim;
  18459. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  18460. batch_rule(self_value, self_bdim, other_value, other_bdim);
  18461. return self;
  18462. }
  18463. template <typename batch_rule_t, batch_rule_t batch_rule>
  18464. at::Tensor eq_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
  18465. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18466. auto maybe_layer = maybeCurrentDynamicLayer();
  18467. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  18468. int64_t cur_level = maybe_layer->layerId();
  18469. if (!isBatchedAtLevel(self, cur_level)) {
  18470. return at::_ops::eq_Scalar::call(self, other);
  18471. }
  18472. Tensor self_value;
  18473. optional<int64_t> self_bdim;
  18474. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18475. auto results = batch_rule(self_value, self_bdim, other);
  18476. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  18477. }
  18478. template <typename batch_rule_t, batch_rule_t batch_rule>
  18479. at::Tensor eq_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  18480. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18481. auto maybe_layer = maybeCurrentDynamicLayer();
  18482. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  18483. int64_t cur_level = maybe_layer->layerId();
  18484. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  18485. return at::_ops::eq_Tensor::call(self, other);
  18486. }
  18487. Tensor self_value;
  18488. optional<int64_t> self_bdim;
  18489. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18490. Tensor other_value;
  18491. optional<int64_t> other_bdim;
  18492. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  18493. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  18494. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  18495. }
  18496. template <typename batch_rule_t, batch_rule_t batch_rule>
  18497. at::Tensor ge_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
  18498. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18499. auto maybe_layer = maybeCurrentDynamicLayer();
  18500. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  18501. int64_t cur_level = maybe_layer->layerId();
  18502. if (!isBatchedAtLevel(self, cur_level)) {
  18503. return at::_ops::ge_Scalar::call(self, other);
  18504. }
  18505. Tensor self_value;
  18506. optional<int64_t> self_bdim;
  18507. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18508. auto results = batch_rule(self_value, self_bdim, other);
  18509. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  18510. }
  18511. template <typename batch_rule_t, batch_rule_t batch_rule>
  18512. at::Tensor ge_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  18513. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18514. auto maybe_layer = maybeCurrentDynamicLayer();
  18515. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  18516. int64_t cur_level = maybe_layer->layerId();
  18517. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  18518. return at::_ops::ge_Tensor::call(self, other);
  18519. }
  18520. Tensor self_value;
  18521. optional<int64_t> self_bdim;
  18522. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18523. Tensor other_value;
  18524. optional<int64_t> other_bdim;
  18525. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  18526. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  18527. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  18528. }
  18529. template <typename batch_rule_t, batch_rule_t batch_rule>
  18530. at::Tensor & ge__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
  18531. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18532. auto maybe_layer = maybeCurrentDynamicLayer();
  18533. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  18534. int64_t cur_level = maybe_layer->layerId();
  18535. if (!isBatchedAtLevel(self, cur_level)) {
  18536. return at::_ops::ge__Scalar::call(self, other);
  18537. }
  18538. Tensor self_value;
  18539. optional<int64_t> self_bdim;
  18540. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18541. batch_rule(self_value, self_bdim, other);
  18542. return self;
  18543. }
  18544. template <typename batch_rule_t, batch_rule_t batch_rule>
  18545. at::Tensor & ge__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  18546. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18547. auto maybe_layer = maybeCurrentDynamicLayer();
  18548. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  18549. int64_t cur_level = maybe_layer->layerId();
  18550. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  18551. return at::_ops::ge__Tensor::call(self, other);
  18552. }
  18553. Tensor self_value;
  18554. optional<int64_t> self_bdim;
  18555. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18556. Tensor other_value;
  18557. optional<int64_t> other_bdim;
  18558. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  18559. batch_rule(self_value, self_bdim, other_value, other_bdim);
  18560. return self;
  18561. }
  18562. template <typename batch_rule_t, batch_rule_t batch_rule>
  18563. at::Tensor greater_equal_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
  18564. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18565. auto maybe_layer = maybeCurrentDynamicLayer();
  18566. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  18567. int64_t cur_level = maybe_layer->layerId();
  18568. if (!isBatchedAtLevel(self, cur_level)) {
  18569. return at::_ops::greater_equal_Scalar::call(self, other);
  18570. }
  18571. Tensor self_value;
  18572. optional<int64_t> self_bdim;
  18573. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18574. auto results = batch_rule(self_value, self_bdim, other);
  18575. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  18576. }
  18577. template <typename batch_rule_t, batch_rule_t batch_rule>
  18578. at::Tensor greater_equal_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  18579. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18580. auto maybe_layer = maybeCurrentDynamicLayer();
  18581. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  18582. int64_t cur_level = maybe_layer->layerId();
  18583. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  18584. return at::_ops::greater_equal_Tensor::call(self, other);
  18585. }
  18586. Tensor self_value;
  18587. optional<int64_t> self_bdim;
  18588. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18589. Tensor other_value;
  18590. optional<int64_t> other_bdim;
  18591. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  18592. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  18593. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  18594. }
  18595. template <typename batch_rule_t, batch_rule_t batch_rule>
  18596. at::Tensor & greater_equal__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
  18597. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18598. auto maybe_layer = maybeCurrentDynamicLayer();
  18599. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  18600. int64_t cur_level = maybe_layer->layerId();
  18601. if (!isBatchedAtLevel(self, cur_level)) {
  18602. return at::_ops::greater_equal__Scalar::call(self, other);
  18603. }
  18604. Tensor self_value;
  18605. optional<int64_t> self_bdim;
  18606. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18607. batch_rule(self_value, self_bdim, other);
  18608. return self;
  18609. }
  18610. template <typename batch_rule_t, batch_rule_t batch_rule>
  18611. at::Tensor & greater_equal__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  18612. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18613. auto maybe_layer = maybeCurrentDynamicLayer();
  18614. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  18615. int64_t cur_level = maybe_layer->layerId();
  18616. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  18617. return at::_ops::greater_equal__Tensor::call(self, other);
  18618. }
  18619. Tensor self_value;
  18620. optional<int64_t> self_bdim;
  18621. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18622. Tensor other_value;
  18623. optional<int64_t> other_bdim;
  18624. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  18625. batch_rule(self_value, self_bdim, other_value, other_bdim);
  18626. return self;
  18627. }
  18628. template <typename batch_rule_t, batch_rule_t batch_rule>
  18629. at::Tensor le_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
  18630. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18631. auto maybe_layer = maybeCurrentDynamicLayer();
  18632. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  18633. int64_t cur_level = maybe_layer->layerId();
  18634. if (!isBatchedAtLevel(self, cur_level)) {
  18635. return at::_ops::le_Scalar::call(self, other);
  18636. }
  18637. Tensor self_value;
  18638. optional<int64_t> self_bdim;
  18639. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18640. auto results = batch_rule(self_value, self_bdim, other);
  18641. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  18642. }
  18643. template <typename batch_rule_t, batch_rule_t batch_rule>
  18644. at::Tensor le_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  18645. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18646. auto maybe_layer = maybeCurrentDynamicLayer();
  18647. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  18648. int64_t cur_level = maybe_layer->layerId();
  18649. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  18650. return at::_ops::le_Tensor::call(self, other);
  18651. }
  18652. Tensor self_value;
  18653. optional<int64_t> self_bdim;
  18654. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18655. Tensor other_value;
  18656. optional<int64_t> other_bdim;
  18657. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  18658. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  18659. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  18660. }
  18661. template <typename batch_rule_t, batch_rule_t batch_rule>
  18662. at::Tensor & le__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
  18663. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18664. auto maybe_layer = maybeCurrentDynamicLayer();
  18665. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  18666. int64_t cur_level = maybe_layer->layerId();
  18667. if (!isBatchedAtLevel(self, cur_level)) {
  18668. return at::_ops::le__Scalar::call(self, other);
  18669. }
  18670. Tensor self_value;
  18671. optional<int64_t> self_bdim;
  18672. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18673. batch_rule(self_value, self_bdim, other);
  18674. return self;
  18675. }
  18676. template <typename batch_rule_t, batch_rule_t batch_rule>
  18677. at::Tensor & le__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  18678. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18679. auto maybe_layer = maybeCurrentDynamicLayer();
  18680. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  18681. int64_t cur_level = maybe_layer->layerId();
  18682. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  18683. return at::_ops::le__Tensor::call(self, other);
  18684. }
  18685. Tensor self_value;
  18686. optional<int64_t> self_bdim;
  18687. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18688. Tensor other_value;
  18689. optional<int64_t> other_bdim;
  18690. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  18691. batch_rule(self_value, self_bdim, other_value, other_bdim);
  18692. return self;
  18693. }
  18694. template <typename batch_rule_t, batch_rule_t batch_rule>
  18695. at::Tensor less_equal_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
  18696. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18697. auto maybe_layer = maybeCurrentDynamicLayer();
  18698. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  18699. int64_t cur_level = maybe_layer->layerId();
  18700. if (!isBatchedAtLevel(self, cur_level)) {
  18701. return at::_ops::less_equal_Scalar::call(self, other);
  18702. }
  18703. Tensor self_value;
  18704. optional<int64_t> self_bdim;
  18705. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18706. auto results = batch_rule(self_value, self_bdim, other);
  18707. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  18708. }
  18709. template <typename batch_rule_t, batch_rule_t batch_rule>
  18710. at::Tensor less_equal_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  18711. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18712. auto maybe_layer = maybeCurrentDynamicLayer();
  18713. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  18714. int64_t cur_level = maybe_layer->layerId();
  18715. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  18716. return at::_ops::less_equal_Tensor::call(self, other);
  18717. }
  18718. Tensor self_value;
  18719. optional<int64_t> self_bdim;
  18720. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18721. Tensor other_value;
  18722. optional<int64_t> other_bdim;
  18723. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  18724. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  18725. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  18726. }
  18727. template <typename batch_rule_t, batch_rule_t batch_rule>
  18728. at::Tensor & less_equal__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
  18729. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18730. auto maybe_layer = maybeCurrentDynamicLayer();
  18731. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  18732. int64_t cur_level = maybe_layer->layerId();
  18733. if (!isBatchedAtLevel(self, cur_level)) {
  18734. return at::_ops::less_equal__Scalar::call(self, other);
  18735. }
  18736. Tensor self_value;
  18737. optional<int64_t> self_bdim;
  18738. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18739. batch_rule(self_value, self_bdim, other);
  18740. return self;
  18741. }
  18742. template <typename batch_rule_t, batch_rule_t batch_rule>
  18743. at::Tensor & less_equal__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  18744. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18745. auto maybe_layer = maybeCurrentDynamicLayer();
  18746. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  18747. int64_t cur_level = maybe_layer->layerId();
  18748. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  18749. return at::_ops::less_equal__Tensor::call(self, other);
  18750. }
  18751. Tensor self_value;
  18752. optional<int64_t> self_bdim;
  18753. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18754. Tensor other_value;
  18755. optional<int64_t> other_bdim;
  18756. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  18757. batch_rule(self_value, self_bdim, other_value, other_bdim);
  18758. return self;
  18759. }
  18760. template <typename batch_rule_t, batch_rule_t batch_rule>
  18761. at::Tensor gt_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
  18762. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18763. auto maybe_layer = maybeCurrentDynamicLayer();
  18764. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  18765. int64_t cur_level = maybe_layer->layerId();
  18766. if (!isBatchedAtLevel(self, cur_level)) {
  18767. return at::_ops::gt_Scalar::call(self, other);
  18768. }
  18769. Tensor self_value;
  18770. optional<int64_t> self_bdim;
  18771. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18772. auto results = batch_rule(self_value, self_bdim, other);
  18773. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  18774. }
  18775. template <typename batch_rule_t, batch_rule_t batch_rule>
  18776. at::Tensor gt_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  18777. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18778. auto maybe_layer = maybeCurrentDynamicLayer();
  18779. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  18780. int64_t cur_level = maybe_layer->layerId();
  18781. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  18782. return at::_ops::gt_Tensor::call(self, other);
  18783. }
  18784. Tensor self_value;
  18785. optional<int64_t> self_bdim;
  18786. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18787. Tensor other_value;
  18788. optional<int64_t> other_bdim;
  18789. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  18790. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  18791. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  18792. }
  18793. template <typename batch_rule_t, batch_rule_t batch_rule>
  18794. at::Tensor & gt__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
  18795. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18796. auto maybe_layer = maybeCurrentDynamicLayer();
  18797. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  18798. int64_t cur_level = maybe_layer->layerId();
  18799. if (!isBatchedAtLevel(self, cur_level)) {
  18800. return at::_ops::gt__Scalar::call(self, other);
  18801. }
  18802. Tensor self_value;
  18803. optional<int64_t> self_bdim;
  18804. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18805. batch_rule(self_value, self_bdim, other);
  18806. return self;
  18807. }
  18808. template <typename batch_rule_t, batch_rule_t batch_rule>
  18809. at::Tensor & gt__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  18810. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18811. auto maybe_layer = maybeCurrentDynamicLayer();
  18812. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  18813. int64_t cur_level = maybe_layer->layerId();
  18814. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  18815. return at::_ops::gt__Tensor::call(self, other);
  18816. }
  18817. Tensor self_value;
  18818. optional<int64_t> self_bdim;
  18819. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18820. Tensor other_value;
  18821. optional<int64_t> other_bdim;
  18822. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  18823. batch_rule(self_value, self_bdim, other_value, other_bdim);
  18824. return self;
  18825. }
  18826. template <typename batch_rule_t, batch_rule_t batch_rule>
  18827. at::Tensor greater_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
  18828. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18829. auto maybe_layer = maybeCurrentDynamicLayer();
  18830. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  18831. int64_t cur_level = maybe_layer->layerId();
  18832. if (!isBatchedAtLevel(self, cur_level)) {
  18833. return at::_ops::greater_Scalar::call(self, other);
  18834. }
  18835. Tensor self_value;
  18836. optional<int64_t> self_bdim;
  18837. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18838. auto results = batch_rule(self_value, self_bdim, other);
  18839. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  18840. }
  18841. template <typename batch_rule_t, batch_rule_t batch_rule>
  18842. at::Tensor greater_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  18843. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18844. auto maybe_layer = maybeCurrentDynamicLayer();
  18845. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  18846. int64_t cur_level = maybe_layer->layerId();
  18847. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  18848. return at::_ops::greater_Tensor::call(self, other);
  18849. }
  18850. Tensor self_value;
  18851. optional<int64_t> self_bdim;
  18852. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18853. Tensor other_value;
  18854. optional<int64_t> other_bdim;
  18855. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  18856. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  18857. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  18858. }
  18859. template <typename batch_rule_t, batch_rule_t batch_rule>
  18860. at::Tensor & greater__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
  18861. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18862. auto maybe_layer = maybeCurrentDynamicLayer();
  18863. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  18864. int64_t cur_level = maybe_layer->layerId();
  18865. if (!isBatchedAtLevel(self, cur_level)) {
  18866. return at::_ops::greater__Scalar::call(self, other);
  18867. }
  18868. Tensor self_value;
  18869. optional<int64_t> self_bdim;
  18870. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18871. batch_rule(self_value, self_bdim, other);
  18872. return self;
  18873. }
  18874. template <typename batch_rule_t, batch_rule_t batch_rule>
  18875. at::Tensor & greater__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  18876. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18877. auto maybe_layer = maybeCurrentDynamicLayer();
  18878. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  18879. int64_t cur_level = maybe_layer->layerId();
  18880. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  18881. return at::_ops::greater__Tensor::call(self, other);
  18882. }
  18883. Tensor self_value;
  18884. optional<int64_t> self_bdim;
  18885. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18886. Tensor other_value;
  18887. optional<int64_t> other_bdim;
  18888. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  18889. batch_rule(self_value, self_bdim, other_value, other_bdim);
  18890. return self;
  18891. }
  18892. template <typename batch_rule_t, batch_rule_t batch_rule>
  18893. at::Tensor lt_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
  18894. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18895. auto maybe_layer = maybeCurrentDynamicLayer();
  18896. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  18897. int64_t cur_level = maybe_layer->layerId();
  18898. if (!isBatchedAtLevel(self, cur_level)) {
  18899. return at::_ops::lt_Scalar::call(self, other);
  18900. }
  18901. Tensor self_value;
  18902. optional<int64_t> self_bdim;
  18903. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18904. auto results = batch_rule(self_value, self_bdim, other);
  18905. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  18906. }
  18907. template <typename batch_rule_t, batch_rule_t batch_rule>
  18908. at::Tensor lt_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  18909. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18910. auto maybe_layer = maybeCurrentDynamicLayer();
  18911. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  18912. int64_t cur_level = maybe_layer->layerId();
  18913. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  18914. return at::_ops::lt_Tensor::call(self, other);
  18915. }
  18916. Tensor self_value;
  18917. optional<int64_t> self_bdim;
  18918. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18919. Tensor other_value;
  18920. optional<int64_t> other_bdim;
  18921. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  18922. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  18923. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  18924. }
  18925. template <typename batch_rule_t, batch_rule_t batch_rule>
  18926. at::Tensor & lt__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
  18927. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18928. auto maybe_layer = maybeCurrentDynamicLayer();
  18929. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  18930. int64_t cur_level = maybe_layer->layerId();
  18931. if (!isBatchedAtLevel(self, cur_level)) {
  18932. return at::_ops::lt__Scalar::call(self, other);
  18933. }
  18934. Tensor self_value;
  18935. optional<int64_t> self_bdim;
  18936. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18937. batch_rule(self_value, self_bdim, other);
  18938. return self;
  18939. }
  18940. template <typename batch_rule_t, batch_rule_t batch_rule>
  18941. at::Tensor & lt__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  18942. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18943. auto maybe_layer = maybeCurrentDynamicLayer();
  18944. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  18945. int64_t cur_level = maybe_layer->layerId();
  18946. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  18947. return at::_ops::lt__Tensor::call(self, other);
  18948. }
  18949. Tensor self_value;
  18950. optional<int64_t> self_bdim;
  18951. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18952. Tensor other_value;
  18953. optional<int64_t> other_bdim;
  18954. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  18955. batch_rule(self_value, self_bdim, other_value, other_bdim);
  18956. return self;
  18957. }
  18958. template <typename batch_rule_t, batch_rule_t batch_rule>
  18959. at::Tensor less_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
  18960. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18961. auto maybe_layer = maybeCurrentDynamicLayer();
  18962. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  18963. int64_t cur_level = maybe_layer->layerId();
  18964. if (!isBatchedAtLevel(self, cur_level)) {
  18965. return at::_ops::less_Scalar::call(self, other);
  18966. }
  18967. Tensor self_value;
  18968. optional<int64_t> self_bdim;
  18969. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18970. auto results = batch_rule(self_value, self_bdim, other);
  18971. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  18972. }
  18973. template <typename batch_rule_t, batch_rule_t batch_rule>
  18974. at::Tensor less_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  18975. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18976. auto maybe_layer = maybeCurrentDynamicLayer();
  18977. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  18978. int64_t cur_level = maybe_layer->layerId();
  18979. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  18980. return at::_ops::less_Tensor::call(self, other);
  18981. }
  18982. Tensor self_value;
  18983. optional<int64_t> self_bdim;
  18984. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  18985. Tensor other_value;
  18986. optional<int64_t> other_bdim;
  18987. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  18988. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  18989. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  18990. }
  18991. template <typename batch_rule_t, batch_rule_t batch_rule>
  18992. at::Tensor & less__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
  18993. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  18994. auto maybe_layer = maybeCurrentDynamicLayer();
  18995. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  18996. int64_t cur_level = maybe_layer->layerId();
  18997. if (!isBatchedAtLevel(self, cur_level)) {
  18998. return at::_ops::less__Scalar::call(self, other);
  18999. }
  19000. Tensor self_value;
  19001. optional<int64_t> self_bdim;
  19002. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19003. batch_rule(self_value, self_bdim, other);
  19004. return self;
  19005. }
  19006. template <typename batch_rule_t, batch_rule_t batch_rule>
  19007. at::Tensor & less__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  19008. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19009. auto maybe_layer = maybeCurrentDynamicLayer();
  19010. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  19011. int64_t cur_level = maybe_layer->layerId();
  19012. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  19013. return at::_ops::less__Tensor::call(self, other);
  19014. }
  19015. Tensor self_value;
  19016. optional<int64_t> self_bdim;
  19017. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19018. Tensor other_value;
  19019. optional<int64_t> other_bdim;
  19020. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  19021. batch_rule(self_value, self_bdim, other_value, other_bdim);
  19022. return self;
  19023. }
  19024. template <typename batch_rule_t, batch_rule_t batch_rule>
  19025. at::Tensor take_generated_plumbing(const at::Tensor & self, const at::Tensor & index) {
  19026. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19027. auto maybe_layer = maybeCurrentDynamicLayer();
  19028. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19029. int64_t cur_level = maybe_layer->layerId();
  19030. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
  19031. return at::_ops::take::call(self, index);
  19032. }
  19033. Tensor self_value;
  19034. optional<int64_t> self_bdim;
  19035. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19036. Tensor index_value;
  19037. optional<int64_t> index_bdim;
  19038. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  19039. auto results = batch_rule(self_value, self_bdim, index_value, index_bdim);
  19040. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  19041. }
  19042. template <typename batch_rule_t, batch_rule_t batch_rule>
  19043. at::Tensor take_along_dim_generated_plumbing(const at::Tensor & self, const at::Tensor & indices, c10::optional<int64_t> dim) {
  19044. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19045. auto maybe_layer = maybeCurrentDynamicLayer();
  19046. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19047. int64_t cur_level = maybe_layer->layerId();
  19048. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
  19049. return at::_ops::take_along_dim::call(self, indices, dim);
  19050. }
  19051. Tensor self_value;
  19052. optional<int64_t> self_bdim;
  19053. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19054. Tensor indices_value;
  19055. optional<int64_t> indices_bdim;
  19056. std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
  19057. auto results = batch_rule(self_value, self_bdim, indices_value, indices_bdim, dim);
  19058. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  19059. }
  19060. template <typename batch_rule_t, batch_rule_t batch_rule>
  19061. at::Tensor index_select_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index) {
  19062. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19063. auto maybe_layer = maybeCurrentDynamicLayer();
  19064. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19065. int64_t cur_level = maybe_layer->layerId();
  19066. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
  19067. return at::_ops::index_select::call(self, dim, index);
  19068. }
  19069. Tensor self_value;
  19070. optional<int64_t> self_bdim;
  19071. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19072. Tensor index_value;
  19073. optional<int64_t> index_bdim;
  19074. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  19075. auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim);
  19076. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  19077. }
  19078. template <typename batch_rule_t, batch_rule_t batch_rule>
  19079. at::Tensor index_select_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index) {
  19080. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19081. auto maybe_layer = maybeCurrentDynamicLayer();
  19082. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19083. int64_t cur_level = maybe_layer->layerId();
  19084. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
  19085. return at::_ops::index_select_dimname::call(self, dim, index);
  19086. }
  19087. Tensor self_value;
  19088. optional<int64_t> self_bdim;
  19089. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19090. Tensor index_value;
  19091. optional<int64_t> index_bdim;
  19092. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  19093. auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim);
  19094. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  19095. }
  19096. template <typename batch_rule_t, batch_rule_t batch_rule>
  19097. at::Tensor index_select_backward_generated_plumbing(const at::Tensor & grad, c10::SymIntArrayRef self_sizes, int64_t dim, const at::Tensor & index) {
  19098. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19099. auto maybe_layer = maybeCurrentDynamicLayer();
  19100. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19101. int64_t cur_level = maybe_layer->layerId();
  19102. if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(index, cur_level)) {
  19103. return at::_ops::index_select_backward::call(grad, self_sizes, dim, index);
  19104. }
  19105. Tensor grad_value;
  19106. optional<int64_t> grad_bdim;
  19107. std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
  19108. Tensor index_value;
  19109. optional<int64_t> index_bdim;
  19110. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  19111. auto results = batch_rule(grad_value, grad_bdim, self_sizes, dim, index_value, index_bdim);
  19112. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  19113. }
  19114. template <typename batch_rule_t, batch_rule_t batch_rule>
  19115. at::Tensor masked_select_generated_plumbing(const at::Tensor & self, const at::Tensor & mask) {
  19116. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19117. auto maybe_layer = maybeCurrentDynamicLayer();
  19118. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19119. int64_t cur_level = maybe_layer->layerId();
  19120. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
  19121. return at::_ops::masked_select::call(self, mask);
  19122. }
  19123. Tensor self_value;
  19124. optional<int64_t> self_bdim;
  19125. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19126. Tensor mask_value;
  19127. optional<int64_t> mask_bdim;
  19128. std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
  19129. auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim);
  19130. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  19131. }
  19132. template <typename batch_rule_t, batch_rule_t batch_rule>
  19133. at::Tensor masked_select_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & mask) {
  19134. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19135. auto maybe_layer = maybeCurrentDynamicLayer();
  19136. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19137. int64_t cur_level = maybe_layer->layerId();
  19138. if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
  19139. return at::_ops::masked_select_backward::call(grad, input, mask);
  19140. }
  19141. Tensor grad_value;
  19142. optional<int64_t> grad_bdim;
  19143. std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
  19144. Tensor input_value;
  19145. optional<int64_t> input_bdim;
  19146. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  19147. Tensor mask_value;
  19148. optional<int64_t> mask_bdim;
  19149. std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
  19150. auto results = batch_rule(grad_value, grad_bdim, input_value, input_bdim, mask_value, mask_bdim);
  19151. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  19152. }
  19153. template <typename batch_rule_t, batch_rule_t batch_rule>
  19154. at::Tensor nonzero_generated_plumbing(const at::Tensor & self) {
  19155. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19156. auto maybe_layer = maybeCurrentDynamicLayer();
  19157. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19158. int64_t cur_level = maybe_layer->layerId();
  19159. if (!isBatchedAtLevel(self, cur_level)) {
  19160. return at::_ops::nonzero::call(self);
  19161. }
  19162. Tensor self_value;
  19163. optional<int64_t> self_bdim;
  19164. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19165. auto results = batch_rule(self_value, self_bdim);
  19166. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  19167. }
  19168. template <typename batch_rule_t, batch_rule_t batch_rule>
  19169. ::std::vector<at::Tensor> nonzero_numpy_generated_plumbing(const at::Tensor & self) {
  19170. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19171. auto maybe_layer = maybeCurrentDynamicLayer();
  19172. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19173. int64_t cur_level = maybe_layer->layerId();
  19174. if (!isBatchedAtLevel(self, cur_level)) {
  19175. return at::_ops::nonzero_numpy::call(self);
  19176. }
  19177. Tensor self_value;
  19178. optional<int64_t> self_bdim;
  19179. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19180. auto results = batch_rule(self_value, self_bdim);
  19181. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  19182. }
  19183. template <typename batch_rule_t, batch_rule_t batch_rule>
  19184. at::Tensor argwhere_generated_plumbing(const at::Tensor & self) {
  19185. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19186. auto maybe_layer = maybeCurrentDynamicLayer();
  19187. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19188. int64_t cur_level = maybe_layer->layerId();
  19189. if (!isBatchedAtLevel(self, cur_level)) {
  19190. return at::_ops::argwhere::call(self);
  19191. }
  19192. Tensor self_value;
  19193. optional<int64_t> self_bdim;
  19194. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19195. auto results = batch_rule(self_value, self_bdim);
  19196. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  19197. }
  19198. template <typename batch_rule_t, batch_rule_t batch_rule>
  19199. at::Tensor gather_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) {
  19200. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19201. auto maybe_layer = maybeCurrentDynamicLayer();
  19202. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19203. int64_t cur_level = maybe_layer->layerId();
  19204. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
  19205. return at::_ops::gather::call(self, dim, index, sparse_grad);
  19206. }
  19207. Tensor self_value;
  19208. optional<int64_t> self_bdim;
  19209. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19210. Tensor index_value;
  19211. optional<int64_t> index_bdim;
  19212. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  19213. auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, sparse_grad);
  19214. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  19215. }
  19216. template <typename batch_rule_t, batch_rule_t batch_rule>
  19217. at::Tensor gather_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) {
  19218. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19219. auto maybe_layer = maybeCurrentDynamicLayer();
  19220. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19221. int64_t cur_level = maybe_layer->layerId();
  19222. if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
  19223. return at::_ops::gather_backward::call(grad, self, dim, index, sparse_grad);
  19224. }
  19225. Tensor grad_value;
  19226. optional<int64_t> grad_bdim;
  19227. std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
  19228. Tensor self_value;
  19229. optional<int64_t> self_bdim;
  19230. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19231. Tensor index_value;
  19232. optional<int64_t> index_bdim;
  19233. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  19234. auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, dim, index_value, index_bdim, sparse_grad);
  19235. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  19236. }
  19237. template <typename batch_rule_t, batch_rule_t batch_rule>
  19238. at::Tensor gather_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad) {
  19239. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19240. auto maybe_layer = maybeCurrentDynamicLayer();
  19241. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19242. int64_t cur_level = maybe_layer->layerId();
  19243. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
  19244. return at::_ops::gather_dimname::call(self, dim, index, sparse_grad);
  19245. }
  19246. Tensor self_value;
  19247. optional<int64_t> self_bdim;
  19248. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19249. Tensor index_value;
  19250. optional<int64_t> index_bdim;
  19251. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  19252. auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, sparse_grad);
  19253. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  19254. }
  19255. template <typename batch_rule_t, batch_rule_t batch_rule>
  19256. at::Tensor _gather_sparse_backward_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & grad) {
  19257. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19258. auto maybe_layer = maybeCurrentDynamicLayer();
  19259. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19260. int64_t cur_level = maybe_layer->layerId();
  19261. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(grad, cur_level)) {
  19262. return at::_ops::_gather_sparse_backward::call(self, dim, index, grad);
  19263. }
  19264. Tensor self_value;
  19265. optional<int64_t> self_bdim;
  19266. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19267. Tensor index_value;
  19268. optional<int64_t> index_bdim;
  19269. std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
  19270. Tensor grad_value;
  19271. optional<int64_t> grad_bdim;
  19272. std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
  19273. auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, grad_value, grad_bdim);
  19274. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  19275. }
  19276. template <typename batch_rule_t, batch_rule_t batch_rule>
  19277. at::Tensor addcmul_generated_plumbing(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
  19278. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19279. auto maybe_layer = maybeCurrentDynamicLayer();
  19280. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19281. int64_t cur_level = maybe_layer->layerId();
  19282. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
  19283. return at::_ops::addcmul::call(self, tensor1, tensor2, value);
  19284. }
  19285. Tensor self_value;
  19286. optional<int64_t> self_bdim;
  19287. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19288. Tensor tensor1_value;
  19289. optional<int64_t> tensor1_bdim;
  19290. std::tie(tensor1_value, tensor1_bdim) = unwrapTensorAtLevel(tensor1, cur_level);
  19291. Tensor tensor2_value;
  19292. optional<int64_t> tensor2_bdim;
  19293. std::tie(tensor2_value, tensor2_bdim) = unwrapTensorAtLevel(tensor2, cur_level);
  19294. auto results = batch_rule(self_value, self_bdim, tensor1_value, tensor1_bdim, tensor2_value, tensor2_bdim, value);
  19295. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  19296. }
  19297. template <typename batch_rule_t, batch_rule_t batch_rule>
  19298. at::Tensor & addcmul__generated_plumbing(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
  19299. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19300. auto maybe_layer = maybeCurrentDynamicLayer();
  19301. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  19302. int64_t cur_level = maybe_layer->layerId();
  19303. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
  19304. return at::_ops::addcmul_::call(self, tensor1, tensor2, value);
  19305. }
  19306. Tensor self_value;
  19307. optional<int64_t> self_bdim;
  19308. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19309. Tensor tensor1_value;
  19310. optional<int64_t> tensor1_bdim;
  19311. std::tie(tensor1_value, tensor1_bdim) = unwrapTensorAtLevel(tensor1, cur_level);
  19312. Tensor tensor2_value;
  19313. optional<int64_t> tensor2_bdim;
  19314. std::tie(tensor2_value, tensor2_bdim) = unwrapTensorAtLevel(tensor2, cur_level);
  19315. batch_rule(self_value, self_bdim, tensor1_value, tensor1_bdim, tensor2_value, tensor2_bdim, value);
  19316. return self;
  19317. }
  19318. template <typename batch_rule_t, batch_rule_t batch_rule>
  19319. at::Tensor addcdiv_generated_plumbing(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
  19320. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19321. auto maybe_layer = maybeCurrentDynamicLayer();
  19322. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19323. int64_t cur_level = maybe_layer->layerId();
  19324. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
  19325. return at::_ops::addcdiv::call(self, tensor1, tensor2, value);
  19326. }
  19327. Tensor self_value;
  19328. optional<int64_t> self_bdim;
  19329. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19330. Tensor tensor1_value;
  19331. optional<int64_t> tensor1_bdim;
  19332. std::tie(tensor1_value, tensor1_bdim) = unwrapTensorAtLevel(tensor1, cur_level);
  19333. Tensor tensor2_value;
  19334. optional<int64_t> tensor2_bdim;
  19335. std::tie(tensor2_value, tensor2_bdim) = unwrapTensorAtLevel(tensor2, cur_level);
  19336. auto results = batch_rule(self_value, self_bdim, tensor1_value, tensor1_bdim, tensor2_value, tensor2_bdim, value);
  19337. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  19338. }
  19339. template <typename batch_rule_t, batch_rule_t batch_rule>
  19340. at::Tensor & addcdiv__generated_plumbing(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
  19341. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19342. auto maybe_layer = maybeCurrentDynamicLayer();
  19343. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  19344. int64_t cur_level = maybe_layer->layerId();
  19345. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
  19346. return at::_ops::addcdiv_::call(self, tensor1, tensor2, value);
  19347. }
  19348. Tensor self_value;
  19349. optional<int64_t> self_bdim;
  19350. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19351. Tensor tensor1_value;
  19352. optional<int64_t> tensor1_bdim;
  19353. std::tie(tensor1_value, tensor1_bdim) = unwrapTensorAtLevel(tensor1, cur_level);
  19354. Tensor tensor2_value;
  19355. optional<int64_t> tensor2_bdim;
  19356. std::tie(tensor2_value, tensor2_bdim) = unwrapTensorAtLevel(tensor2, cur_level);
  19357. batch_rule(self_value, self_bdim, tensor1_value, tensor1_bdim, tensor2_value, tensor2_bdim, value);
  19358. return self;
  19359. }
  19360. template <typename batch_rule_t, batch_rule_t batch_rule>
  19361. at::Tensor cross_entropy_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, double label_smoothing) {
  19362. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19363. auto maybe_layer = maybeCurrentDynamicLayer();
  19364. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19365. int64_t cur_level = maybe_layer->layerId();
  19366. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
  19367. return at::_ops::cross_entropy_loss::call(self, target, weight, reduction, ignore_index, label_smoothing);
  19368. }
  19369. Tensor self_value;
  19370. optional<int64_t> self_bdim;
  19371. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19372. Tensor target_value;
  19373. optional<int64_t> target_bdim;
  19374. std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
  19375. optional<Tensor> weight_value;
  19376. optional<int64_t> weight_bdim;
  19377. if (weight) {
  19378. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
  19379. }
  19380. auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index, label_smoothing);
  19381. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  19382. }
  19383. template <typename batch_rule_t, batch_rule_t batch_rule>
  19384. ::std::tuple<at::Tensor,at::Tensor> triangular_solve_generated_plumbing(const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular) {
  19385. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19386. auto maybe_layer = maybeCurrentDynamicLayer();
  19387. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19388. int64_t cur_level = maybe_layer->layerId();
  19389. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(A, cur_level)) {
  19390. return at::_ops::triangular_solve::call(self, A, upper, transpose, unitriangular);
  19391. }
  19392. Tensor self_value;
  19393. optional<int64_t> self_bdim;
  19394. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19395. Tensor A_value;
  19396. optional<int64_t> A_bdim;
  19397. std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
  19398. auto results = batch_rule(self_value, self_bdim, A_value, A_bdim, upper, transpose, unitriangular);
  19399. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  19400. }
  19401. template <typename batch_rule_t, batch_rule_t batch_rule>
  19402. void _linalg_check_errors_generated_plumbing(const at::Tensor & info, c10::string_view api_name, bool is_matrix) {
  19403. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19404. auto maybe_layer = maybeCurrentDynamicLayer();
  19405. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  19406. int64_t cur_level = maybe_layer->layerId();
  19407. if (!isBatchedAtLevel(info, cur_level)) {
  19408. return at::_ops::_linalg_check_errors::call(info, api_name, is_matrix);
  19409. }
  19410. Tensor info_value;
  19411. optional<int64_t> info_bdim;
  19412. std::tie(info_value, info_bdim) = unwrapTensorAtLevel(info, cur_level);
  19413. batch_rule(info_value, info_bdim, api_name, is_matrix);
  19414. }
  19415. template <typename batch_rule_t, batch_rule_t batch_rule>
  19416. at::Tensor linalg_solve_triangular_generated_plumbing(const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular) {
  19417. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19418. auto maybe_layer = maybeCurrentDynamicLayer();
  19419. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19420. int64_t cur_level = maybe_layer->layerId();
  19421. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(B, cur_level)) {
  19422. return at::_ops::linalg_solve_triangular::call(self, B, upper, left, unitriangular);
  19423. }
  19424. Tensor self_value;
  19425. optional<int64_t> self_bdim;
  19426. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19427. Tensor B_value;
  19428. optional<int64_t> B_bdim;
  19429. std::tie(B_value, B_bdim) = unwrapTensorAtLevel(B, cur_level);
  19430. auto results = batch_rule(self_value, self_bdim, B_value, B_bdim, upper, left, unitriangular);
  19431. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  19432. }
  19433. template <typename batch_rule_t, batch_rule_t batch_rule>
  19434. at::Tensor linalg_vander_generated_plumbing(const at::Tensor & x, c10::optional<int64_t> N) {
  19435. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19436. auto maybe_layer = maybeCurrentDynamicLayer();
  19437. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19438. int64_t cur_level = maybe_layer->layerId();
  19439. if (!isBatchedAtLevel(x, cur_level)) {
  19440. return at::_ops::linalg_vander::call(x, N);
  19441. }
  19442. Tensor x_value;
  19443. optional<int64_t> x_bdim;
  19444. std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
  19445. auto results = batch_rule(x_value, x_bdim, N);
  19446. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  19447. }
  19448. template <typename batch_rule_t, batch_rule_t batch_rule>
  19449. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> svd_generated_plumbing(const at::Tensor & self, bool some, bool compute_uv) {
  19450. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19451. auto maybe_layer = maybeCurrentDynamicLayer();
  19452. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19453. int64_t cur_level = maybe_layer->layerId();
  19454. if (!isBatchedAtLevel(self, cur_level)) {
  19455. return at::_ops::svd::call(self, some, compute_uv);
  19456. }
  19457. Tensor self_value;
  19458. optional<int64_t> self_bdim;
  19459. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19460. auto results = batch_rule(self_value, self_bdim, some, compute_uv);
  19461. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  19462. }
  19463. template <typename batch_rule_t, batch_rule_t batch_rule>
  19464. at::Tensor swapaxes_generated_plumbing(const at::Tensor & self, int64_t axis0, int64_t axis1) {
  19465. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19466. auto maybe_layer = maybeCurrentDynamicLayer();
  19467. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19468. int64_t cur_level = maybe_layer->layerId();
  19469. if (!isBatchedAtLevel(self, cur_level)) {
  19470. return at::_ops::swapaxes::call(self, axis0, axis1);
  19471. }
  19472. Tensor self_value;
  19473. optional<int64_t> self_bdim;
  19474. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19475. auto results = batch_rule(self_value, self_bdim, axis0, axis1);
  19476. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  19477. }
  19478. template <typename batch_rule_t, batch_rule_t batch_rule>
  19479. at::Tensor swapdims_generated_plumbing(const at::Tensor & self, int64_t dim0, int64_t dim1) {
  19480. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19481. auto maybe_layer = maybeCurrentDynamicLayer();
  19482. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19483. int64_t cur_level = maybe_layer->layerId();
  19484. if (!isBatchedAtLevel(self, cur_level)) {
  19485. return at::_ops::swapdims::call(self, dim0, dim1);
  19486. }
  19487. Tensor self_value;
  19488. optional<int64_t> self_bdim;
  19489. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19490. auto results = batch_rule(self_value, self_bdim, dim0, dim1);
  19491. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  19492. }
  19493. template <typename batch_rule_t, batch_rule_t batch_rule>
  19494. at::Tensor cholesky_generated_plumbing(const at::Tensor & self, bool upper) {
  19495. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19496. auto maybe_layer = maybeCurrentDynamicLayer();
  19497. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19498. int64_t cur_level = maybe_layer->layerId();
  19499. if (!isBatchedAtLevel(self, cur_level)) {
  19500. return at::_ops::cholesky::call(self, upper);
  19501. }
  19502. Tensor self_value;
  19503. optional<int64_t> self_bdim;
  19504. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19505. auto results = batch_rule(self_value, self_bdim, upper);
  19506. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  19507. }
  19508. template <typename batch_rule_t, batch_rule_t batch_rule>
  19509. at::Tensor cholesky_solve_generated_plumbing(const at::Tensor & self, const at::Tensor & input2, bool upper) {
  19510. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19511. auto maybe_layer = maybeCurrentDynamicLayer();
  19512. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19513. int64_t cur_level = maybe_layer->layerId();
  19514. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(input2, cur_level)) {
  19515. return at::_ops::cholesky_solve::call(self, input2, upper);
  19516. }
  19517. Tensor self_value;
  19518. optional<int64_t> self_bdim;
  19519. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19520. Tensor input2_value;
  19521. optional<int64_t> input2_bdim;
  19522. std::tie(input2_value, input2_bdim) = unwrapTensorAtLevel(input2, cur_level);
  19523. auto results = batch_rule(self_value, self_bdim, input2_value, input2_bdim, upper);
  19524. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  19525. }
  19526. template <typename batch_rule_t, batch_rule_t batch_rule>
  19527. at::Tensor _cholesky_solve_helper_generated_plumbing(const at::Tensor & self, const at::Tensor & A, bool upper) {
  19528. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19529. auto maybe_layer = maybeCurrentDynamicLayer();
  19530. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19531. int64_t cur_level = maybe_layer->layerId();
  19532. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(A, cur_level)) {
  19533. return at::_ops::_cholesky_solve_helper::call(self, A, upper);
  19534. }
  19535. Tensor self_value;
  19536. optional<int64_t> self_bdim;
  19537. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19538. Tensor A_value;
  19539. optional<int64_t> A_bdim;
  19540. std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
  19541. auto results = batch_rule(self_value, self_bdim, A_value, A_bdim, upper);
  19542. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  19543. }
  19544. template <typename batch_rule_t, batch_rule_t batch_rule>
  19545. at::Tensor cholesky_inverse_generated_plumbing(const at::Tensor & self, bool upper) {
  19546. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19547. auto maybe_layer = maybeCurrentDynamicLayer();
  19548. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19549. int64_t cur_level = maybe_layer->layerId();
  19550. if (!isBatchedAtLevel(self, cur_level)) {
  19551. return at::_ops::cholesky_inverse::call(self, upper);
  19552. }
  19553. Tensor self_value;
  19554. optional<int64_t> self_bdim;
  19555. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19556. auto results = batch_rule(self_value, self_bdim, upper);
  19557. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  19558. }
  19559. template <typename batch_rule_t, batch_rule_t batch_rule>
  19560. ::std::tuple<at::Tensor,at::Tensor> qr_generated_plumbing(const at::Tensor & self, bool some) {
  19561. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19562. auto maybe_layer = maybeCurrentDynamicLayer();
  19563. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19564. int64_t cur_level = maybe_layer->layerId();
  19565. if (!isBatchedAtLevel(self, cur_level)) {
  19566. return at::_ops::qr::call(self, some);
  19567. }
  19568. Tensor self_value;
  19569. optional<int64_t> self_bdim;
  19570. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19571. auto results = batch_rule(self_value, self_bdim, some);
  19572. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  19573. }
  19574. template <typename batch_rule_t, batch_rule_t batch_rule>
  19575. ::std::tuple<at::Tensor,at::Tensor> geqrf_generated_plumbing(const at::Tensor & self) {
  19576. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19577. auto maybe_layer = maybeCurrentDynamicLayer();
  19578. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19579. int64_t cur_level = maybe_layer->layerId();
  19580. if (!isBatchedAtLevel(self, cur_level)) {
  19581. return at::_ops::geqrf::call(self);
  19582. }
  19583. Tensor self_value;
  19584. optional<int64_t> self_bdim;
  19585. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19586. auto results = batch_rule(self_value, self_bdim);
  19587. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  19588. }
  19589. template <typename batch_rule_t, batch_rule_t batch_rule>
  19590. at::Tensor orgqr_generated_plumbing(const at::Tensor & self, const at::Tensor & input2) {
  19591. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19592. auto maybe_layer = maybeCurrentDynamicLayer();
  19593. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19594. int64_t cur_level = maybe_layer->layerId();
  19595. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(input2, cur_level)) {
  19596. return at::_ops::orgqr::call(self, input2);
  19597. }
  19598. Tensor self_value;
  19599. optional<int64_t> self_bdim;
  19600. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19601. Tensor input2_value;
  19602. optional<int64_t> input2_bdim;
  19603. std::tie(input2_value, input2_bdim) = unwrapTensorAtLevel(input2, cur_level);
  19604. auto results = batch_rule(self_value, self_bdim, input2_value, input2_bdim);
  19605. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  19606. }
  19607. template <typename batch_rule_t, batch_rule_t batch_rule>
  19608. at::Tensor ormqr_generated_plumbing(const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose) {
  19609. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19610. auto maybe_layer = maybeCurrentDynamicLayer();
  19611. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19612. int64_t cur_level = maybe_layer->layerId();
  19613. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(input2, cur_level) && !isBatchedAtLevel(input3, cur_level)) {
  19614. return at::_ops::ormqr::call(self, input2, input3, left, transpose);
  19615. }
  19616. Tensor self_value;
  19617. optional<int64_t> self_bdim;
  19618. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19619. Tensor input2_value;
  19620. optional<int64_t> input2_bdim;
  19621. std::tie(input2_value, input2_bdim) = unwrapTensorAtLevel(input2, cur_level);
  19622. Tensor input3_value;
  19623. optional<int64_t> input3_bdim;
  19624. std::tie(input3_value, input3_bdim) = unwrapTensorAtLevel(input3, cur_level);
  19625. auto results = batch_rule(self_value, self_bdim, input2_value, input2_bdim, input3_value, input3_bdim, left, transpose);
  19626. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  19627. }
  19628. template <typename batch_rule_t, batch_rule_t batch_rule>
  19629. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _lu_with_info_generated_plumbing(const at::Tensor & self, bool pivot, bool check_errors) {
  19630. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19631. auto maybe_layer = maybeCurrentDynamicLayer();
  19632. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19633. int64_t cur_level = maybe_layer->layerId();
  19634. if (!isBatchedAtLevel(self, cur_level)) {
  19635. return at::_ops::_lu_with_info::call(self, pivot, check_errors);
  19636. }
  19637. Tensor self_value;
  19638. optional<int64_t> self_bdim;
  19639. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19640. auto results = batch_rule(self_value, self_bdim, pivot, check_errors);
  19641. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  19642. }
  19643. template <typename batch_rule_t, batch_rule_t batch_rule>
  19644. at::Tensor lu_solve_generated_plumbing(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots) {
  19645. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19646. auto maybe_layer = maybeCurrentDynamicLayer();
  19647. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19648. int64_t cur_level = maybe_layer->layerId();
  19649. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(LU_data, cur_level) && !isBatchedAtLevel(LU_pivots, cur_level)) {
  19650. return at::_ops::lu_solve::call(self, LU_data, LU_pivots);
  19651. }
  19652. Tensor self_value;
  19653. optional<int64_t> self_bdim;
  19654. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19655. Tensor LU_data_value;
  19656. optional<int64_t> LU_data_bdim;
  19657. std::tie(LU_data_value, LU_data_bdim) = unwrapTensorAtLevel(LU_data, cur_level);
  19658. Tensor LU_pivots_value;
  19659. optional<int64_t> LU_pivots_bdim;
  19660. std::tie(LU_pivots_value, LU_pivots_bdim) = unwrapTensorAtLevel(LU_pivots, cur_level);
  19661. auto results = batch_rule(self_value, self_bdim, LU_data_value, LU_data_bdim, LU_pivots_value, LU_pivots_bdim);
  19662. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  19663. }
  19664. template <typename batch_rule_t, batch_rule_t batch_rule>
  19665. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> lu_unpack_generated_plumbing(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots) {
  19666. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19667. auto maybe_layer = maybeCurrentDynamicLayer();
  19668. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19669. int64_t cur_level = maybe_layer->layerId();
  19670. if (!isBatchedAtLevel(LU_data, cur_level) && !isBatchedAtLevel(LU_pivots, cur_level)) {
  19671. return at::_ops::lu_unpack::call(LU_data, LU_pivots, unpack_data, unpack_pivots);
  19672. }
  19673. Tensor LU_data_value;
  19674. optional<int64_t> LU_data_bdim;
  19675. std::tie(LU_data_value, LU_data_bdim) = unwrapTensorAtLevel(LU_data, cur_level);
  19676. Tensor LU_pivots_value;
  19677. optional<int64_t> LU_pivots_bdim;
  19678. std::tie(LU_pivots_value, LU_pivots_bdim) = unwrapTensorAtLevel(LU_pivots, cur_level);
  19679. auto results = batch_rule(LU_data_value, LU_data_bdim, LU_pivots_value, LU_pivots_bdim, unpack_data, unpack_pivots);
  19680. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  19681. }
  19682. template <typename batch_rule_t, batch_rule_t batch_rule>
  19683. at::Tensor multinomial_generated_plumbing(const at::Tensor & self, int64_t num_samples, bool replacement, c10::optional<at::Generator> generator) {
  19684. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19685. auto maybe_layer = maybeCurrentDynamicLayer();
  19686. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19687. int64_t cur_level = maybe_layer->layerId();
  19688. if (!isBatchedAtLevel(self, cur_level)) {
  19689. return at::_ops::multinomial::call(self, num_samples, replacement, generator);
  19690. }
  19691. Tensor self_value;
  19692. optional<int64_t> self_bdim;
  19693. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19694. auto results = batch_rule(self_value, self_bdim, num_samples, replacement, generator);
  19695. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  19696. }
  19697. template <typename batch_rule_t, batch_rule_t batch_rule>
  19698. at::Tensor & lgamma__generated_plumbing(at::Tensor & self) {
  19699. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19700. auto maybe_layer = maybeCurrentDynamicLayer();
  19701. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  19702. int64_t cur_level = maybe_layer->layerId();
  19703. if (!isBatchedAtLevel(self, cur_level)) {
  19704. return at::_ops::lgamma_::call(self);
  19705. }
  19706. Tensor self_value;
  19707. optional<int64_t> self_bdim;
  19708. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19709. batch_rule(self_value, self_bdim);
  19710. return self;
  19711. }
  19712. template <typename batch_rule_t, batch_rule_t batch_rule>
  19713. at::Tensor lgamma_generated_plumbing(const at::Tensor & self) {
  19714. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19715. auto maybe_layer = maybeCurrentDynamicLayer();
  19716. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19717. int64_t cur_level = maybe_layer->layerId();
  19718. if (!isBatchedAtLevel(self, cur_level)) {
  19719. return at::_ops::lgamma::call(self);
  19720. }
  19721. Tensor self_value;
  19722. optional<int64_t> self_bdim;
  19723. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19724. auto results = batch_rule(self_value, self_bdim);
  19725. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  19726. }
  19727. template <typename batch_rule_t, batch_rule_t batch_rule>
  19728. at::Tensor digamma_generated_plumbing(const at::Tensor & self) {
  19729. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19730. auto maybe_layer = maybeCurrentDynamicLayer();
  19731. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19732. int64_t cur_level = maybe_layer->layerId();
  19733. if (!isBatchedAtLevel(self, cur_level)) {
  19734. return at::_ops::digamma::call(self);
  19735. }
  19736. Tensor self_value;
  19737. optional<int64_t> self_bdim;
  19738. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19739. auto results = batch_rule(self_value, self_bdim);
  19740. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  19741. }
  19742. template <typename batch_rule_t, batch_rule_t batch_rule>
  19743. at::Tensor polygamma_generated_plumbing(int64_t n, const at::Tensor & self) {
  19744. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19745. auto maybe_layer = maybeCurrentDynamicLayer();
  19746. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19747. int64_t cur_level = maybe_layer->layerId();
  19748. if (!isBatchedAtLevel(self, cur_level)) {
  19749. return at::_ops::polygamma::call(n, self);
  19750. }
  19751. Tensor self_value;
  19752. optional<int64_t> self_bdim;
  19753. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19754. auto results = batch_rule(n, self_value, self_bdim);
  19755. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  19756. }
  19757. template <typename batch_rule_t, batch_rule_t batch_rule>
  19758. at::Tensor & polygamma__generated_plumbing(at::Tensor & self, int64_t n) {
  19759. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19760. auto maybe_layer = maybeCurrentDynamicLayer();
  19761. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  19762. int64_t cur_level = maybe_layer->layerId();
  19763. if (!isBatchedAtLevel(self, cur_level)) {
  19764. return at::_ops::polygamma_::call(self, n);
  19765. }
  19766. Tensor self_value;
  19767. optional<int64_t> self_bdim;
  19768. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19769. batch_rule(self_value, self_bdim, n);
  19770. return self;
  19771. }
  19772. template <typename batch_rule_t, batch_rule_t batch_rule>
  19773. at::Tensor erfinv_generated_plumbing(const at::Tensor & self) {
  19774. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19775. auto maybe_layer = maybeCurrentDynamicLayer();
  19776. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19777. int64_t cur_level = maybe_layer->layerId();
  19778. if (!isBatchedAtLevel(self, cur_level)) {
  19779. return at::_ops::erfinv::call(self);
  19780. }
  19781. Tensor self_value;
  19782. optional<int64_t> self_bdim;
  19783. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19784. auto results = batch_rule(self_value, self_bdim);
  19785. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  19786. }
  19787. template <typename batch_rule_t, batch_rule_t batch_rule>
  19788. at::Tensor & erfinv__generated_plumbing(at::Tensor & self) {
  19789. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19790. auto maybe_layer = maybeCurrentDynamicLayer();
  19791. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  19792. int64_t cur_level = maybe_layer->layerId();
  19793. if (!isBatchedAtLevel(self, cur_level)) {
  19794. return at::_ops::erfinv_::call(self);
  19795. }
  19796. Tensor self_value;
  19797. optional<int64_t> self_bdim;
  19798. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19799. batch_rule(self_value, self_bdim);
  19800. return self;
  19801. }
  19802. template <typename batch_rule_t, batch_rule_t batch_rule>
  19803. at::Tensor i0_generated_plumbing(const at::Tensor & self) {
  19804. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19805. auto maybe_layer = maybeCurrentDynamicLayer();
  19806. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19807. int64_t cur_level = maybe_layer->layerId();
  19808. if (!isBatchedAtLevel(self, cur_level)) {
  19809. return at::_ops::i0::call(self);
  19810. }
  19811. Tensor self_value;
  19812. optional<int64_t> self_bdim;
  19813. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19814. auto results = batch_rule(self_value, self_bdim);
  19815. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  19816. }
  19817. template <typename batch_rule_t, batch_rule_t batch_rule>
  19818. at::Tensor & i0__generated_plumbing(at::Tensor & self) {
  19819. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19820. auto maybe_layer = maybeCurrentDynamicLayer();
  19821. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  19822. int64_t cur_level = maybe_layer->layerId();
  19823. if (!isBatchedAtLevel(self, cur_level)) {
  19824. return at::_ops::i0_::call(self);
  19825. }
  19826. Tensor self_value;
  19827. optional<int64_t> self_bdim;
  19828. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19829. batch_rule(self_value, self_bdim);
  19830. return self;
  19831. }
  19832. template <typename batch_rule_t, batch_rule_t batch_rule>
  19833. at::Tensor sign_generated_plumbing(const at::Tensor & self) {
  19834. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19835. auto maybe_layer = maybeCurrentDynamicLayer();
  19836. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19837. int64_t cur_level = maybe_layer->layerId();
  19838. if (!isBatchedAtLevel(self, cur_level)) {
  19839. return at::_ops::sign::call(self);
  19840. }
  19841. Tensor self_value;
  19842. optional<int64_t> self_bdim;
  19843. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19844. auto results = batch_rule(self_value, self_bdim);
  19845. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  19846. }
  19847. template <typename batch_rule_t, batch_rule_t batch_rule>
  19848. at::Tensor & sign__generated_plumbing(at::Tensor & self) {
  19849. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19850. auto maybe_layer = maybeCurrentDynamicLayer();
  19851. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  19852. int64_t cur_level = maybe_layer->layerId();
  19853. if (!isBatchedAtLevel(self, cur_level)) {
  19854. return at::_ops::sign_::call(self);
  19855. }
  19856. Tensor self_value;
  19857. optional<int64_t> self_bdim;
  19858. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19859. batch_rule(self_value, self_bdim);
  19860. return self;
  19861. }
  19862. template <typename batch_rule_t, batch_rule_t batch_rule>
  19863. at::Tensor signbit_generated_plumbing(const at::Tensor & self) {
  19864. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19865. auto maybe_layer = maybeCurrentDynamicLayer();
  19866. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19867. int64_t cur_level = maybe_layer->layerId();
  19868. if (!isBatchedAtLevel(self, cur_level)) {
  19869. return at::_ops::signbit::call(self);
  19870. }
  19871. Tensor self_value;
  19872. optional<int64_t> self_bdim;
  19873. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19874. auto results = batch_rule(self_value, self_bdim);
  19875. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  19876. }
  19877. template <typename batch_rule_t, batch_rule_t batch_rule>
  19878. at::Tensor dist_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & p) {
  19879. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19880. auto maybe_layer = maybeCurrentDynamicLayer();
  19881. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19882. int64_t cur_level = maybe_layer->layerId();
  19883. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  19884. return at::_ops::dist::call(self, other, p);
  19885. }
  19886. Tensor self_value;
  19887. optional<int64_t> self_bdim;
  19888. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19889. Tensor other_value;
  19890. optional<int64_t> other_bdim;
  19891. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  19892. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, p);
  19893. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  19894. }
  19895. template <typename batch_rule_t, batch_rule_t batch_rule>
  19896. at::Tensor & atan2__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  19897. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19898. auto maybe_layer = maybeCurrentDynamicLayer();
  19899. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  19900. int64_t cur_level = maybe_layer->layerId();
  19901. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  19902. return at::_ops::atan2_::call(self, other);
  19903. }
  19904. Tensor self_value;
  19905. optional<int64_t> self_bdim;
  19906. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19907. Tensor other_value;
  19908. optional<int64_t> other_bdim;
  19909. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  19910. batch_rule(self_value, self_bdim, other_value, other_bdim);
  19911. return self;
  19912. }
  19913. template <typename batch_rule_t, batch_rule_t batch_rule>
  19914. at::Tensor atan2_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  19915. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19916. auto maybe_layer = maybeCurrentDynamicLayer();
  19917. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19918. int64_t cur_level = maybe_layer->layerId();
  19919. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  19920. return at::_ops::atan2::call(self, other);
  19921. }
  19922. Tensor self_value;
  19923. optional<int64_t> self_bdim;
  19924. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19925. Tensor other_value;
  19926. optional<int64_t> other_bdim;
  19927. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  19928. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  19929. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  19930. }
  19931. template <typename batch_rule_t, batch_rule_t batch_rule>
  19932. at::Tensor arctan2_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  19933. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19934. auto maybe_layer = maybeCurrentDynamicLayer();
  19935. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19936. int64_t cur_level = maybe_layer->layerId();
  19937. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  19938. return at::_ops::arctan2::call(self, other);
  19939. }
  19940. Tensor self_value;
  19941. optional<int64_t> self_bdim;
  19942. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19943. Tensor other_value;
  19944. optional<int64_t> other_bdim;
  19945. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  19946. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  19947. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  19948. }
  19949. template <typename batch_rule_t, batch_rule_t batch_rule>
  19950. at::Tensor & arctan2__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  19951. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19952. auto maybe_layer = maybeCurrentDynamicLayer();
  19953. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  19954. int64_t cur_level = maybe_layer->layerId();
  19955. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  19956. return at::_ops::arctan2_::call(self, other);
  19957. }
  19958. Tensor self_value;
  19959. optional<int64_t> self_bdim;
  19960. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19961. Tensor other_value;
  19962. optional<int64_t> other_bdim;
  19963. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  19964. batch_rule(self_value, self_bdim, other_value, other_bdim);
  19965. return self;
  19966. }
  19967. template <typename batch_rule_t, batch_rule_t batch_rule>
  19968. at::Tensor lerp_Scalar_generated_plumbing(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) {
  19969. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19970. auto maybe_layer = maybeCurrentDynamicLayer();
  19971. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19972. int64_t cur_level = maybe_layer->layerId();
  19973. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(end, cur_level)) {
  19974. return at::_ops::lerp_Scalar::call(self, end, weight);
  19975. }
  19976. Tensor self_value;
  19977. optional<int64_t> self_bdim;
  19978. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19979. Tensor end_value;
  19980. optional<int64_t> end_bdim;
  19981. std::tie(end_value, end_bdim) = unwrapTensorAtLevel(end, cur_level);
  19982. auto results = batch_rule(self_value, self_bdim, end_value, end_bdim, weight);
  19983. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  19984. }
  19985. template <typename batch_rule_t, batch_rule_t batch_rule>
  19986. at::Tensor lerp_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) {
  19987. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  19988. auto maybe_layer = maybeCurrentDynamicLayer();
  19989. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  19990. int64_t cur_level = maybe_layer->layerId();
  19991. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(end, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
  19992. return at::_ops::lerp_Tensor::call(self, end, weight);
  19993. }
  19994. Tensor self_value;
  19995. optional<int64_t> self_bdim;
  19996. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  19997. Tensor end_value;
  19998. optional<int64_t> end_bdim;
  19999. std::tie(end_value, end_bdim) = unwrapTensorAtLevel(end, cur_level);
  20000. Tensor weight_value;
  20001. optional<int64_t> weight_bdim;
  20002. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  20003. auto results = batch_rule(self_value, self_bdim, end_value, end_bdim, weight_value, weight_bdim);
  20004. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  20005. }
  20006. template <typename batch_rule_t, batch_rule_t batch_rule>
  20007. at::Tensor histc_generated_plumbing(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max) {
  20008. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20009. auto maybe_layer = maybeCurrentDynamicLayer();
  20010. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20011. int64_t cur_level = maybe_layer->layerId();
  20012. if (!isBatchedAtLevel(self, cur_level)) {
  20013. return at::_ops::histc::call(self, bins, min, max);
  20014. }
  20015. Tensor self_value;
  20016. optional<int64_t> self_bdim;
  20017. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20018. auto results = batch_rule(self_value, self_bdim, bins, min, max);
  20019. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  20020. }
  20021. template <typename batch_rule_t, batch_rule_t batch_rule>
  20022. ::std::tuple<at::Tensor,at::Tensor> histogram_bins_tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & bins, const c10::optional<at::Tensor> & weight, bool density) {
  20023. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20024. auto maybe_layer = maybeCurrentDynamicLayer();
  20025. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20026. int64_t cur_level = maybe_layer->layerId();
  20027. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(bins, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
  20028. return at::_ops::histogram_bins_tensor::call(self, bins, weight, density);
  20029. }
  20030. Tensor self_value;
  20031. optional<int64_t> self_bdim;
  20032. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20033. Tensor bins_value;
  20034. optional<int64_t> bins_bdim;
  20035. std::tie(bins_value, bins_bdim) = unwrapTensorAtLevel(bins, cur_level);
  20036. optional<Tensor> weight_value;
  20037. optional<int64_t> weight_bdim;
  20038. if (weight) {
  20039. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
  20040. }
  20041. auto results = batch_rule(self_value, self_bdim, bins_value, bins_bdim, weight_value, weight_bdim, density);
  20042. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  20043. }
  20044. template <typename batch_rule_t, batch_rule_t batch_rule>
  20045. ::std::tuple<at::Tensor,at::Tensor> histogram_bin_ct_generated_plumbing(const at::Tensor & self, int64_t bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
  20046. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20047. auto maybe_layer = maybeCurrentDynamicLayer();
  20048. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20049. int64_t cur_level = maybe_layer->layerId();
  20050. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
  20051. return at::_ops::histogram_bin_ct::call(self, bins, range, weight, density);
  20052. }
  20053. Tensor self_value;
  20054. optional<int64_t> self_bdim;
  20055. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20056. optional<Tensor> weight_value;
  20057. optional<int64_t> weight_bdim;
  20058. if (weight) {
  20059. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
  20060. }
  20061. auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density);
  20062. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  20063. }
  20064. template <typename batch_rule_t, batch_rule_t batch_rule>
  20065. ::std::vector<at::Tensor> _histogramdd_bin_edges_generated_plumbing(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
  20066. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20067. auto maybe_layer = maybeCurrentDynamicLayer();
  20068. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20069. int64_t cur_level = maybe_layer->layerId();
  20070. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
  20071. return at::_ops::_histogramdd_bin_edges::call(self, bins, range, weight, density);
  20072. }
  20073. Tensor self_value;
  20074. optional<int64_t> self_bdim;
  20075. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20076. optional<Tensor> weight_value;
  20077. optional<int64_t> weight_bdim;
  20078. if (weight) {
  20079. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
  20080. }
  20081. auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density);
  20082. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  20083. }
  20084. template <typename batch_rule_t, batch_rule_t batch_rule>
  20085. at::Tensor _histogramdd_from_bin_cts_generated_plumbing(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
  20086. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20087. auto maybe_layer = maybeCurrentDynamicLayer();
  20088. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20089. int64_t cur_level = maybe_layer->layerId();
  20090. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
  20091. return at::_ops::_histogramdd_from_bin_cts::call(self, bins, range, weight, density);
  20092. }
  20093. Tensor self_value;
  20094. optional<int64_t> self_bdim;
  20095. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20096. optional<Tensor> weight_value;
  20097. optional<int64_t> weight_bdim;
  20098. if (weight) {
  20099. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
  20100. }
  20101. auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density);
  20102. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  20103. }
  20104. template <typename batch_rule_t, batch_rule_t batch_rule>
  20105. at::Tensor _histogramdd_from_bin_tensors_generated_plumbing(const at::Tensor & self, at::TensorList bins, const c10::optional<at::Tensor> & weight, bool density) {
  20106. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20107. auto maybe_layer = maybeCurrentDynamicLayer();
  20108. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20109. int64_t cur_level = maybe_layer->layerId();
  20110. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(bins, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
  20111. return at::_ops::_histogramdd_from_bin_tensors::call(self, bins, weight, density);
  20112. }
  20113. Tensor self_value;
  20114. optional<int64_t> self_bdim;
  20115. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20116. optional<Tensor> weight_value;
  20117. optional<int64_t> weight_bdim;
  20118. if (weight) {
  20119. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
  20120. }
  20121. auto results = batch_rule(self_value, self_bdim, bins, weight_value, weight_bdim, density);
  20122. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  20123. }
  20124. template <typename batch_rule_t, batch_rule_t batch_rule>
  20125. ::std::tuple<at::Tensor,::std::vector<at::Tensor>> histogramdd_generated_plumbing(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
  20126. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20127. auto maybe_layer = maybeCurrentDynamicLayer();
  20128. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20129. int64_t cur_level = maybe_layer->layerId();
  20130. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
  20131. return at::_ops::histogramdd::call(self, bins, range, weight, density);
  20132. }
  20133. Tensor self_value;
  20134. optional<int64_t> self_bdim;
  20135. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20136. optional<Tensor> weight_value;
  20137. optional<int64_t> weight_bdim;
  20138. if (weight) {
  20139. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
  20140. }
  20141. auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density);
  20142. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level));
  20143. }
  20144. template <typename batch_rule_t, batch_rule_t batch_rule>
  20145. ::std::tuple<at::Tensor,::std::vector<at::Tensor>> histogramdd_int_bins_generated_plumbing(const at::Tensor & self, int64_t bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
  20146. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20147. auto maybe_layer = maybeCurrentDynamicLayer();
  20148. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20149. int64_t cur_level = maybe_layer->layerId();
  20150. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
  20151. return at::_ops::histogramdd_int_bins::call(self, bins, range, weight, density);
  20152. }
  20153. Tensor self_value;
  20154. optional<int64_t> self_bdim;
  20155. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20156. optional<Tensor> weight_value;
  20157. optional<int64_t> weight_bdim;
  20158. if (weight) {
  20159. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
  20160. }
  20161. auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density);
  20162. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level));
  20163. }
  20164. template <typename batch_rule_t, batch_rule_t batch_rule>
  20165. ::std::tuple<at::Tensor,::std::vector<at::Tensor>> histogramdd_TensorList_bins_generated_plumbing(const at::Tensor & self, at::TensorList bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
  20166. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20167. auto maybe_layer = maybeCurrentDynamicLayer();
  20168. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20169. int64_t cur_level = maybe_layer->layerId();
  20170. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(bins, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
  20171. return at::_ops::histogramdd_TensorList_bins::call(self, bins, range, weight, density);
  20172. }
  20173. Tensor self_value;
  20174. optional<int64_t> self_bdim;
  20175. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20176. optional<Tensor> weight_value;
  20177. optional<int64_t> weight_bdim;
  20178. if (weight) {
  20179. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
  20180. }
  20181. auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density);
  20182. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level));
  20183. }
  20184. template <typename batch_rule_t, batch_rule_t batch_rule>
  20185. at::Tensor fmod_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
  20186. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20187. auto maybe_layer = maybeCurrentDynamicLayer();
  20188. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20189. int64_t cur_level = maybe_layer->layerId();
  20190. if (!isBatchedAtLevel(self, cur_level)) {
  20191. return at::_ops::fmod_Scalar::call(self, other);
  20192. }
  20193. Tensor self_value;
  20194. optional<int64_t> self_bdim;
  20195. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20196. auto results = batch_rule(self_value, self_bdim, other);
  20197. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  20198. }
  20199. template <typename batch_rule_t, batch_rule_t batch_rule>
  20200. at::Tensor & fmod__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
  20201. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20202. auto maybe_layer = maybeCurrentDynamicLayer();
  20203. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  20204. int64_t cur_level = maybe_layer->layerId();
  20205. if (!isBatchedAtLevel(self, cur_level)) {
  20206. return at::_ops::fmod__Scalar::call(self, other);
  20207. }
  20208. Tensor self_value;
  20209. optional<int64_t> self_bdim;
  20210. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20211. batch_rule(self_value, self_bdim, other);
  20212. return self;
  20213. }
  20214. template <typename batch_rule_t, batch_rule_t batch_rule>
  20215. at::Tensor fmod_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  20216. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20217. auto maybe_layer = maybeCurrentDynamicLayer();
  20218. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20219. int64_t cur_level = maybe_layer->layerId();
  20220. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  20221. return at::_ops::fmod_Tensor::call(self, other);
  20222. }
  20223. Tensor self_value;
  20224. optional<int64_t> self_bdim;
  20225. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20226. Tensor other_value;
  20227. optional<int64_t> other_bdim;
  20228. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  20229. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  20230. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  20231. }
  20232. template <typename batch_rule_t, batch_rule_t batch_rule>
  20233. at::Tensor & fmod__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  20234. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20235. auto maybe_layer = maybeCurrentDynamicLayer();
  20236. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  20237. int64_t cur_level = maybe_layer->layerId();
  20238. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  20239. return at::_ops::fmod__Tensor::call(self, other);
  20240. }
  20241. Tensor self_value;
  20242. optional<int64_t> self_bdim;
  20243. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20244. Tensor other_value;
  20245. optional<int64_t> other_bdim;
  20246. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  20247. batch_rule(self_value, self_bdim, other_value, other_bdim);
  20248. return self;
  20249. }
  20250. template <typename batch_rule_t, batch_rule_t batch_rule>
  20251. at::Tensor hypot_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  20252. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20253. auto maybe_layer = maybeCurrentDynamicLayer();
  20254. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20255. int64_t cur_level = maybe_layer->layerId();
  20256. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  20257. return at::_ops::hypot::call(self, other);
  20258. }
  20259. Tensor self_value;
  20260. optional<int64_t> self_bdim;
  20261. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20262. Tensor other_value;
  20263. optional<int64_t> other_bdim;
  20264. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  20265. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  20266. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  20267. }
  20268. template <typename batch_rule_t, batch_rule_t batch_rule>
  20269. at::Tensor & hypot__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  20270. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20271. auto maybe_layer = maybeCurrentDynamicLayer();
  20272. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  20273. int64_t cur_level = maybe_layer->layerId();
  20274. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  20275. return at::_ops::hypot_::call(self, other);
  20276. }
  20277. Tensor self_value;
  20278. optional<int64_t> self_bdim;
  20279. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20280. Tensor other_value;
  20281. optional<int64_t> other_bdim;
  20282. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  20283. batch_rule(self_value, self_bdim, other_value, other_bdim);
  20284. return self;
  20285. }
  20286. template <typename batch_rule_t, batch_rule_t batch_rule>
  20287. at::Tensor igamma_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  20288. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20289. auto maybe_layer = maybeCurrentDynamicLayer();
  20290. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20291. int64_t cur_level = maybe_layer->layerId();
  20292. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  20293. return at::_ops::igamma::call(self, other);
  20294. }
  20295. Tensor self_value;
  20296. optional<int64_t> self_bdim;
  20297. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20298. Tensor other_value;
  20299. optional<int64_t> other_bdim;
  20300. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  20301. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  20302. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  20303. }
  20304. template <typename batch_rule_t, batch_rule_t batch_rule>
  20305. at::Tensor & igamma__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  20306. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20307. auto maybe_layer = maybeCurrentDynamicLayer();
  20308. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  20309. int64_t cur_level = maybe_layer->layerId();
  20310. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  20311. return at::_ops::igamma_::call(self, other);
  20312. }
  20313. Tensor self_value;
  20314. optional<int64_t> self_bdim;
  20315. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20316. Tensor other_value;
  20317. optional<int64_t> other_bdim;
  20318. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  20319. batch_rule(self_value, self_bdim, other_value, other_bdim);
  20320. return self;
  20321. }
  20322. template <typename batch_rule_t, batch_rule_t batch_rule>
  20323. at::Tensor igammac_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  20324. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20325. auto maybe_layer = maybeCurrentDynamicLayer();
  20326. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20327. int64_t cur_level = maybe_layer->layerId();
  20328. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  20329. return at::_ops::igammac::call(self, other);
  20330. }
  20331. Tensor self_value;
  20332. optional<int64_t> self_bdim;
  20333. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20334. Tensor other_value;
  20335. optional<int64_t> other_bdim;
  20336. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  20337. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  20338. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  20339. }
  20340. template <typename batch_rule_t, batch_rule_t batch_rule>
  20341. at::Tensor & igammac__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  20342. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20343. auto maybe_layer = maybeCurrentDynamicLayer();
  20344. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  20345. int64_t cur_level = maybe_layer->layerId();
  20346. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  20347. return at::_ops::igammac_::call(self, other);
  20348. }
  20349. Tensor self_value;
  20350. optional<int64_t> self_bdim;
  20351. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20352. Tensor other_value;
  20353. optional<int64_t> other_bdim;
  20354. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  20355. batch_rule(self_value, self_bdim, other_value, other_bdim);
  20356. return self;
  20357. }
  20358. template <typename batch_rule_t, batch_rule_t batch_rule>
  20359. at::Tensor nextafter_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  20360. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20361. auto maybe_layer = maybeCurrentDynamicLayer();
  20362. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20363. int64_t cur_level = maybe_layer->layerId();
  20364. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  20365. return at::_ops::nextafter::call(self, other);
  20366. }
  20367. Tensor self_value;
  20368. optional<int64_t> self_bdim;
  20369. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20370. Tensor other_value;
  20371. optional<int64_t> other_bdim;
  20372. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  20373. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  20374. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  20375. }
  20376. template <typename batch_rule_t, batch_rule_t batch_rule>
  20377. at::Tensor & nextafter__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  20378. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20379. auto maybe_layer = maybeCurrentDynamicLayer();
  20380. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  20381. int64_t cur_level = maybe_layer->layerId();
  20382. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  20383. return at::_ops::nextafter_::call(self, other);
  20384. }
  20385. Tensor self_value;
  20386. optional<int64_t> self_bdim;
  20387. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20388. Tensor other_value;
  20389. optional<int64_t> other_bdim;
  20390. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  20391. batch_rule(self_value, self_bdim, other_value, other_bdim);
  20392. return self;
  20393. }
  20394. template <typename batch_rule_t, batch_rule_t batch_rule>
  20395. at::Tensor remainder_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
  20396. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20397. auto maybe_layer = maybeCurrentDynamicLayer();
  20398. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20399. int64_t cur_level = maybe_layer->layerId();
  20400. if (!isBatchedAtLevel(self, cur_level)) {
  20401. return at::_ops::remainder_Scalar::call(self, other);
  20402. }
  20403. Tensor self_value;
  20404. optional<int64_t> self_bdim;
  20405. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20406. auto results = batch_rule(self_value, self_bdim, other);
  20407. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  20408. }
  20409. template <typename batch_rule_t, batch_rule_t batch_rule>
  20410. at::Tensor & remainder__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
  20411. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20412. auto maybe_layer = maybeCurrentDynamicLayer();
  20413. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  20414. int64_t cur_level = maybe_layer->layerId();
  20415. if (!isBatchedAtLevel(self, cur_level)) {
  20416. return at::_ops::remainder__Scalar::call(self, other);
  20417. }
  20418. Tensor self_value;
  20419. optional<int64_t> self_bdim;
  20420. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20421. batch_rule(self_value, self_bdim, other);
  20422. return self;
  20423. }
  20424. template <typename batch_rule_t, batch_rule_t batch_rule>
  20425. at::Tensor remainder_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  20426. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20427. auto maybe_layer = maybeCurrentDynamicLayer();
  20428. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20429. int64_t cur_level = maybe_layer->layerId();
  20430. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  20431. return at::_ops::remainder_Tensor::call(self, other);
  20432. }
  20433. Tensor self_value;
  20434. optional<int64_t> self_bdim;
  20435. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20436. Tensor other_value;
  20437. optional<int64_t> other_bdim;
  20438. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  20439. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  20440. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  20441. }
  20442. template <typename batch_rule_t, batch_rule_t batch_rule>
  20443. at::Tensor & remainder__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
  20444. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20445. auto maybe_layer = maybeCurrentDynamicLayer();
  20446. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  20447. int64_t cur_level = maybe_layer->layerId();
  20448. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  20449. return at::_ops::remainder__Tensor::call(self, other);
  20450. }
  20451. Tensor self_value;
  20452. optional<int64_t> self_bdim;
  20453. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20454. Tensor other_value;
  20455. optional<int64_t> other_bdim;
  20456. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  20457. batch_rule(self_value, self_bdim, other_value, other_bdim);
  20458. return self;
  20459. }
  20460. template <typename batch_rule_t, batch_rule_t batch_rule>
  20461. at::Tensor remainder_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
  20462. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20463. auto maybe_layer = maybeCurrentDynamicLayer();
  20464. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20465. int64_t cur_level = maybe_layer->layerId();
  20466. if (!isBatchedAtLevel(other, cur_level)) {
  20467. return at::_ops::remainder_Scalar_Tensor::call(self, other);
  20468. }
  20469. Tensor other_value;
  20470. optional<int64_t> other_bdim;
  20471. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  20472. auto results = batch_rule(self, other_value, other_bdim);
  20473. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  20474. }
  20475. template <typename batch_rule_t, batch_rule_t batch_rule>
  20476. at::Tensor min_generated_plumbing(const at::Tensor & self) {
  20477. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20478. auto maybe_layer = maybeCurrentDynamicLayer();
  20479. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20480. int64_t cur_level = maybe_layer->layerId();
  20481. if (!isBatchedAtLevel(self, cur_level)) {
  20482. return at::_ops::min::call(self);
  20483. }
  20484. Tensor self_value;
  20485. optional<int64_t> self_bdim;
  20486. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20487. auto results = batch_rule(self_value, self_bdim);
  20488. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  20489. }
  20490. template <typename batch_rule_t, batch_rule_t batch_rule>
  20491. at::Tensor fmin_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  20492. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20493. auto maybe_layer = maybeCurrentDynamicLayer();
  20494. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20495. int64_t cur_level = maybe_layer->layerId();
  20496. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  20497. return at::_ops::fmin::call(self, other);
  20498. }
  20499. Tensor self_value;
  20500. optional<int64_t> self_bdim;
  20501. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20502. Tensor other_value;
  20503. optional<int64_t> other_bdim;
  20504. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  20505. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  20506. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  20507. }
  20508. template <typename batch_rule_t, batch_rule_t batch_rule>
  20509. at::Tensor max_generated_plumbing(const at::Tensor & self) {
  20510. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20511. auto maybe_layer = maybeCurrentDynamicLayer();
  20512. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20513. int64_t cur_level = maybe_layer->layerId();
  20514. if (!isBatchedAtLevel(self, cur_level)) {
  20515. return at::_ops::max::call(self);
  20516. }
  20517. Tensor self_value;
  20518. optional<int64_t> self_bdim;
  20519. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20520. auto results = batch_rule(self_value, self_bdim);
  20521. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  20522. }
  20523. template <typename batch_rule_t, batch_rule_t batch_rule>
  20524. at::Tensor fmax_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  20525. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20526. auto maybe_layer = maybeCurrentDynamicLayer();
  20527. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20528. int64_t cur_level = maybe_layer->layerId();
  20529. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  20530. return at::_ops::fmax::call(self, other);
  20531. }
  20532. Tensor self_value;
  20533. optional<int64_t> self_bdim;
  20534. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20535. Tensor other_value;
  20536. optional<int64_t> other_bdim;
  20537. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  20538. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  20539. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  20540. }
  20541. template <typename batch_rule_t, batch_rule_t batch_rule>
  20542. at::Tensor maximum_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  20543. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20544. auto maybe_layer = maybeCurrentDynamicLayer();
  20545. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20546. int64_t cur_level = maybe_layer->layerId();
  20547. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  20548. return at::_ops::maximum::call(self, other);
  20549. }
  20550. Tensor self_value;
  20551. optional<int64_t> self_bdim;
  20552. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20553. Tensor other_value;
  20554. optional<int64_t> other_bdim;
  20555. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  20556. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  20557. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  20558. }
  20559. template <typename batch_rule_t, batch_rule_t batch_rule>
  20560. at::Tensor max_other_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  20561. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20562. auto maybe_layer = maybeCurrentDynamicLayer();
  20563. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20564. int64_t cur_level = maybe_layer->layerId();
  20565. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  20566. return at::_ops::max_other::call(self, other);
  20567. }
  20568. Tensor self_value;
  20569. optional<int64_t> self_bdim;
  20570. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20571. Tensor other_value;
  20572. optional<int64_t> other_bdim;
  20573. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  20574. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  20575. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  20576. }
  20577. template <typename batch_rule_t, batch_rule_t batch_rule>
  20578. at::Tensor minimum_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  20579. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20580. auto maybe_layer = maybeCurrentDynamicLayer();
  20581. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20582. int64_t cur_level = maybe_layer->layerId();
  20583. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  20584. return at::_ops::minimum::call(self, other);
  20585. }
  20586. Tensor self_value;
  20587. optional<int64_t> self_bdim;
  20588. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20589. Tensor other_value;
  20590. optional<int64_t> other_bdim;
  20591. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  20592. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  20593. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  20594. }
  20595. template <typename batch_rule_t, batch_rule_t batch_rule>
  20596. at::Tensor min_other_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  20597. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20598. auto maybe_layer = maybeCurrentDynamicLayer();
  20599. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20600. int64_t cur_level = maybe_layer->layerId();
  20601. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  20602. return at::_ops::min_other::call(self, other);
  20603. }
  20604. Tensor self_value;
  20605. optional<int64_t> self_bdim;
  20606. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20607. Tensor other_value;
  20608. optional<int64_t> other_bdim;
  20609. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  20610. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  20611. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  20612. }
  20613. template <typename batch_rule_t, batch_rule_t batch_rule>
  20614. at::Tensor quantile_generated_plumbing(const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
  20615. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20616. auto maybe_layer = maybeCurrentDynamicLayer();
  20617. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20618. int64_t cur_level = maybe_layer->layerId();
  20619. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(q, cur_level)) {
  20620. return at::_ops::quantile::call(self, q, dim, keepdim, interpolation);
  20621. }
  20622. Tensor self_value;
  20623. optional<int64_t> self_bdim;
  20624. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20625. Tensor q_value;
  20626. optional<int64_t> q_bdim;
  20627. std::tie(q_value, q_bdim) = unwrapTensorAtLevel(q, cur_level);
  20628. auto results = batch_rule(self_value, self_bdim, q_value, q_bdim, dim, keepdim, interpolation);
  20629. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  20630. }
  20631. template <typename batch_rule_t, batch_rule_t batch_rule>
  20632. at::Tensor quantile_scalar_generated_plumbing(const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
  20633. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20634. auto maybe_layer = maybeCurrentDynamicLayer();
  20635. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20636. int64_t cur_level = maybe_layer->layerId();
  20637. if (!isBatchedAtLevel(self, cur_level)) {
  20638. return at::_ops::quantile_scalar::call(self, q, dim, keepdim, interpolation);
  20639. }
  20640. Tensor self_value;
  20641. optional<int64_t> self_bdim;
  20642. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20643. auto results = batch_rule(self_value, self_bdim, q, dim, keepdim, interpolation);
  20644. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  20645. }
  20646. template <typename batch_rule_t, batch_rule_t batch_rule>
  20647. at::Tensor nanquantile_generated_plumbing(const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
  20648. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20649. auto maybe_layer = maybeCurrentDynamicLayer();
  20650. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20651. int64_t cur_level = maybe_layer->layerId();
  20652. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(q, cur_level)) {
  20653. return at::_ops::nanquantile::call(self, q, dim, keepdim, interpolation);
  20654. }
  20655. Tensor self_value;
  20656. optional<int64_t> self_bdim;
  20657. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20658. Tensor q_value;
  20659. optional<int64_t> q_bdim;
  20660. std::tie(q_value, q_bdim) = unwrapTensorAtLevel(q, cur_level);
  20661. auto results = batch_rule(self_value, self_bdim, q_value, q_bdim, dim, keepdim, interpolation);
  20662. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  20663. }
  20664. template <typename batch_rule_t, batch_rule_t batch_rule>
  20665. at::Tensor nanquantile_scalar_generated_plumbing(const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
  20666. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20667. auto maybe_layer = maybeCurrentDynamicLayer();
  20668. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20669. int64_t cur_level = maybe_layer->layerId();
  20670. if (!isBatchedAtLevel(self, cur_level)) {
  20671. return at::_ops::nanquantile_scalar::call(self, q, dim, keepdim, interpolation);
  20672. }
  20673. Tensor self_value;
  20674. optional<int64_t> self_bdim;
  20675. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20676. auto results = batch_rule(self_value, self_bdim, q, dim, keepdim, interpolation);
  20677. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  20678. }
  20679. template <typename batch_rule_t, batch_rule_t batch_rule>
  20680. ::std::tuple<at::Tensor,at::Tensor> sort_generated_plumbing(const at::Tensor & self, int64_t dim, bool descending) {
  20681. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20682. auto maybe_layer = maybeCurrentDynamicLayer();
  20683. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20684. int64_t cur_level = maybe_layer->layerId();
  20685. if (!isBatchedAtLevel(self, cur_level)) {
  20686. return at::_ops::sort::call(self, dim, descending);
  20687. }
  20688. Tensor self_value;
  20689. optional<int64_t> self_bdim;
  20690. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20691. auto results = batch_rule(self_value, self_bdim, dim, descending);
  20692. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  20693. }
  20694. template <typename batch_rule_t, batch_rule_t batch_rule>
  20695. ::std::tuple<at::Tensor,at::Tensor> sort_stable_generated_plumbing(const at::Tensor & self, c10::optional<bool> stable, int64_t dim, bool descending) {
  20696. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20697. auto maybe_layer = maybeCurrentDynamicLayer();
  20698. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20699. int64_t cur_level = maybe_layer->layerId();
  20700. if (!isBatchedAtLevel(self, cur_level)) {
  20701. return at::_ops::sort_stable::call(self, stable, dim, descending);
  20702. }
  20703. Tensor self_value;
  20704. optional<int64_t> self_bdim;
  20705. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20706. auto results = batch_rule(self_value, self_bdim, stable, dim, descending);
  20707. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  20708. }
  20709. template <typename batch_rule_t, batch_rule_t batch_rule>
  20710. ::std::tuple<at::Tensor,at::Tensor> sort_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool descending) {
  20711. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20712. auto maybe_layer = maybeCurrentDynamicLayer();
  20713. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20714. int64_t cur_level = maybe_layer->layerId();
  20715. if (!isBatchedAtLevel(self, cur_level)) {
  20716. return at::_ops::sort_dimname::call(self, dim, descending);
  20717. }
  20718. Tensor self_value;
  20719. optional<int64_t> self_bdim;
  20720. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20721. auto results = batch_rule(self_value, self_bdim, dim, descending);
  20722. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  20723. }
  20724. template <typename batch_rule_t, batch_rule_t batch_rule>
  20725. ::std::tuple<at::Tensor,at::Tensor> sort_dimname_stable_generated_plumbing(const at::Tensor & self, c10::optional<bool> stable, at::Dimname dim, bool descending) {
  20726. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20727. auto maybe_layer = maybeCurrentDynamicLayer();
  20728. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20729. int64_t cur_level = maybe_layer->layerId();
  20730. if (!isBatchedAtLevel(self, cur_level)) {
  20731. return at::_ops::sort_dimname_stable::call(self, stable, dim, descending);
  20732. }
  20733. Tensor self_value;
  20734. optional<int64_t> self_bdim;
  20735. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20736. auto results = batch_rule(self_value, self_bdim, stable, dim, descending);
  20737. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  20738. }
  20739. template <typename batch_rule_t, batch_rule_t batch_rule>
  20740. at::Tensor msort_generated_plumbing(const at::Tensor & self) {
  20741. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20742. auto maybe_layer = maybeCurrentDynamicLayer();
  20743. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20744. int64_t cur_level = maybe_layer->layerId();
  20745. if (!isBatchedAtLevel(self, cur_level)) {
  20746. return at::_ops::msort::call(self);
  20747. }
  20748. Tensor self_value;
  20749. optional<int64_t> self_bdim;
  20750. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20751. auto results = batch_rule(self_value, self_bdim);
  20752. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  20753. }
  20754. template <typename batch_rule_t, batch_rule_t batch_rule>
  20755. at::Tensor argsort_generated_plumbing(const at::Tensor & self, int64_t dim, bool descending) {
  20756. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20757. auto maybe_layer = maybeCurrentDynamicLayer();
  20758. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20759. int64_t cur_level = maybe_layer->layerId();
  20760. if (!isBatchedAtLevel(self, cur_level)) {
  20761. return at::_ops::argsort::call(self, dim, descending);
  20762. }
  20763. Tensor self_value;
  20764. optional<int64_t> self_bdim;
  20765. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20766. auto results = batch_rule(self_value, self_bdim, dim, descending);
  20767. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  20768. }
  20769. template <typename batch_rule_t, batch_rule_t batch_rule>
  20770. at::Tensor argsort_stable_generated_plumbing(const at::Tensor & self, bool stable, int64_t dim, bool descending) {
  20771. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20772. auto maybe_layer = maybeCurrentDynamicLayer();
  20773. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20774. int64_t cur_level = maybe_layer->layerId();
  20775. if (!isBatchedAtLevel(self, cur_level)) {
  20776. return at::_ops::argsort_stable::call(self, stable, dim, descending);
  20777. }
  20778. Tensor self_value;
  20779. optional<int64_t> self_bdim;
  20780. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20781. auto results = batch_rule(self_value, self_bdim, stable, dim, descending);
  20782. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  20783. }
  20784. template <typename batch_rule_t, batch_rule_t batch_rule>
  20785. at::Tensor argsort_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool descending) {
  20786. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20787. auto maybe_layer = maybeCurrentDynamicLayer();
  20788. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20789. int64_t cur_level = maybe_layer->layerId();
  20790. if (!isBatchedAtLevel(self, cur_level)) {
  20791. return at::_ops::argsort_dimname::call(self, dim, descending);
  20792. }
  20793. Tensor self_value;
  20794. optional<int64_t> self_bdim;
  20795. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20796. auto results = batch_rule(self_value, self_bdim, dim, descending);
  20797. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  20798. }
  20799. template <typename batch_rule_t, batch_rule_t batch_rule>
  20800. ::std::tuple<at::Tensor,at::Tensor> topk_generated_plumbing(const at::Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) {
  20801. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20802. auto maybe_layer = maybeCurrentDynamicLayer();
  20803. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20804. int64_t cur_level = maybe_layer->layerId();
  20805. if (!isBatchedAtLevel(self, cur_level)) {
  20806. return at::_ops::topk::call(self, k, dim, largest, sorted);
  20807. }
  20808. Tensor self_value;
  20809. optional<int64_t> self_bdim;
  20810. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20811. auto results = batch_rule(self_value, self_bdim, k, dim, largest, sorted);
  20812. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  20813. }
  20814. template <typename batch_rule_t, batch_rule_t batch_rule>
  20815. at::Tensor all_generated_plumbing(const at::Tensor & self) {
  20816. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20817. auto maybe_layer = maybeCurrentDynamicLayer();
  20818. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20819. int64_t cur_level = maybe_layer->layerId();
  20820. if (!isBatchedAtLevel(self, cur_level)) {
  20821. return at::_ops::all::call(self);
  20822. }
  20823. Tensor self_value;
  20824. optional<int64_t> self_bdim;
  20825. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20826. auto results = batch_rule(self_value, self_bdim);
  20827. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  20828. }
  20829. template <typename batch_rule_t, batch_rule_t batch_rule>
  20830. at::Tensor any_generated_plumbing(const at::Tensor & self) {
  20831. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20832. auto maybe_layer = maybeCurrentDynamicLayer();
  20833. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20834. int64_t cur_level = maybe_layer->layerId();
  20835. if (!isBatchedAtLevel(self, cur_level)) {
  20836. return at::_ops::any::call(self);
  20837. }
  20838. Tensor self_value;
  20839. optional<int64_t> self_bdim;
  20840. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20841. auto results = batch_rule(self_value, self_bdim);
  20842. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  20843. }
  20844. template <typename batch_rule_t, batch_rule_t batch_rule>
  20845. at::Tensor renorm_generated_plumbing(const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) {
  20846. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20847. auto maybe_layer = maybeCurrentDynamicLayer();
  20848. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20849. int64_t cur_level = maybe_layer->layerId();
  20850. if (!isBatchedAtLevel(self, cur_level)) {
  20851. return at::_ops::renorm::call(self, p, dim, maxnorm);
  20852. }
  20853. Tensor self_value;
  20854. optional<int64_t> self_bdim;
  20855. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20856. auto results = batch_rule(self_value, self_bdim, p, dim, maxnorm);
  20857. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  20858. }
  20859. template <typename batch_rule_t, batch_rule_t batch_rule>
  20860. at::Tensor & renorm__generated_plumbing(at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) {
  20861. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20862. auto maybe_layer = maybeCurrentDynamicLayer();
  20863. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  20864. int64_t cur_level = maybe_layer->layerId();
  20865. if (!isBatchedAtLevel(self, cur_level)) {
  20866. return at::_ops::renorm_::call(self, p, dim, maxnorm);
  20867. }
  20868. Tensor self_value;
  20869. optional<int64_t> self_bdim;
  20870. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20871. batch_rule(self_value, self_bdim, p, dim, maxnorm);
  20872. return self;
  20873. }
  20874. template <typename batch_rule_t, batch_rule_t batch_rule>
  20875. at::Tensor unfold_generated_plumbing(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) {
  20876. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20877. auto maybe_layer = maybeCurrentDynamicLayer();
  20878. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20879. int64_t cur_level = maybe_layer->layerId();
  20880. if (!isBatchedAtLevel(self, cur_level)) {
  20881. return at::_ops::unfold::call(self, dimension, size, step);
  20882. }
  20883. Tensor self_value;
  20884. optional<int64_t> self_bdim;
  20885. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20886. auto results = batch_rule(self_value, self_bdim, dimension, size, step);
  20887. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  20888. }
  20889. template <typename batch_rule_t, batch_rule_t batch_rule>
  20890. at::Tensor unfold_backward_generated_plumbing(const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) {
  20891. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20892. auto maybe_layer = maybeCurrentDynamicLayer();
  20893. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20894. int64_t cur_level = maybe_layer->layerId();
  20895. if (!isBatchedAtLevel(grad_in, cur_level)) {
  20896. return at::_ops::unfold_backward::call(grad_in, input_sizes, dim, size, step);
  20897. }
  20898. Tensor grad_in_value;
  20899. optional<int64_t> grad_in_bdim;
  20900. std::tie(grad_in_value, grad_in_bdim) = unwrapTensorAtLevel(grad_in, cur_level);
  20901. auto results = batch_rule(grad_in_value, grad_in_bdim, input_sizes, dim, size, step);
  20902. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  20903. }
  20904. template <typename batch_rule_t, batch_rule_t batch_rule>
  20905. at::Tensor pow_Tensor_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & exponent) {
  20906. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20907. auto maybe_layer = maybeCurrentDynamicLayer();
  20908. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20909. int64_t cur_level = maybe_layer->layerId();
  20910. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(exponent, cur_level)) {
  20911. return at::_ops::pow_Tensor_Tensor::call(self, exponent);
  20912. }
  20913. Tensor self_value;
  20914. optional<int64_t> self_bdim;
  20915. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20916. Tensor exponent_value;
  20917. optional<int64_t> exponent_bdim;
  20918. std::tie(exponent_value, exponent_bdim) = unwrapTensorAtLevel(exponent, cur_level);
  20919. auto results = batch_rule(self_value, self_bdim, exponent_value, exponent_bdim);
  20920. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  20921. }
  20922. template <typename batch_rule_t, batch_rule_t batch_rule>
  20923. at::Tensor pow_Scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & exponent) {
  20924. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20925. auto maybe_layer = maybeCurrentDynamicLayer();
  20926. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20927. int64_t cur_level = maybe_layer->layerId();
  20928. if (!isBatchedAtLevel(exponent, cur_level)) {
  20929. return at::_ops::pow_Scalar::call(self, exponent);
  20930. }
  20931. Tensor exponent_value;
  20932. optional<int64_t> exponent_bdim;
  20933. std::tie(exponent_value, exponent_bdim) = unwrapTensorAtLevel(exponent, cur_level);
  20934. auto results = batch_rule(self, exponent_value, exponent_bdim);
  20935. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  20936. }
  20937. template <typename batch_rule_t, batch_rule_t batch_rule>
  20938. at::Tensor pow_Tensor_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & exponent) {
  20939. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20940. auto maybe_layer = maybeCurrentDynamicLayer();
  20941. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20942. int64_t cur_level = maybe_layer->layerId();
  20943. if (!isBatchedAtLevel(self, cur_level)) {
  20944. return at::_ops::pow_Tensor_Scalar::call(self, exponent);
  20945. }
  20946. Tensor self_value;
  20947. optional<int64_t> self_bdim;
  20948. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20949. auto results = batch_rule(self_value, self_bdim, exponent);
  20950. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  20951. }
  20952. template <typename batch_rule_t, batch_rule_t batch_rule>
  20953. at::Tensor & pow__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & exponent) {
  20954. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20955. auto maybe_layer = maybeCurrentDynamicLayer();
  20956. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  20957. int64_t cur_level = maybe_layer->layerId();
  20958. if (!isBatchedAtLevel(self, cur_level)) {
  20959. return at::_ops::pow__Scalar::call(self, exponent);
  20960. }
  20961. Tensor self_value;
  20962. optional<int64_t> self_bdim;
  20963. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20964. batch_rule(self_value, self_bdim, exponent);
  20965. return self;
  20966. }
  20967. template <typename batch_rule_t, batch_rule_t batch_rule>
  20968. at::Tensor & pow__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & exponent) {
  20969. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20970. auto maybe_layer = maybeCurrentDynamicLayer();
  20971. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  20972. int64_t cur_level = maybe_layer->layerId();
  20973. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(exponent, cur_level)) {
  20974. return at::_ops::pow__Tensor::call(self, exponent);
  20975. }
  20976. Tensor self_value;
  20977. optional<int64_t> self_bdim;
  20978. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20979. Tensor exponent_value;
  20980. optional<int64_t> exponent_bdim;
  20981. std::tie(exponent_value, exponent_bdim) = unwrapTensorAtLevel(exponent, cur_level);
  20982. batch_rule(self_value, self_bdim, exponent_value, exponent_bdim);
  20983. return self;
  20984. }
  20985. template <typename batch_rule_t, batch_rule_t batch_rule>
  20986. at::Tensor float_power_Tensor_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & exponent) {
  20987. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  20988. auto maybe_layer = maybeCurrentDynamicLayer();
  20989. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  20990. int64_t cur_level = maybe_layer->layerId();
  20991. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(exponent, cur_level)) {
  20992. return at::_ops::float_power_Tensor_Tensor::call(self, exponent);
  20993. }
  20994. Tensor self_value;
  20995. optional<int64_t> self_bdim;
  20996. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  20997. Tensor exponent_value;
  20998. optional<int64_t> exponent_bdim;
  20999. std::tie(exponent_value, exponent_bdim) = unwrapTensorAtLevel(exponent, cur_level);
  21000. auto results = batch_rule(self_value, self_bdim, exponent_value, exponent_bdim);
  21001. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  21002. }
  21003. template <typename batch_rule_t, batch_rule_t batch_rule>
  21004. at::Tensor float_power_Scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & exponent) {
  21005. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21006. auto maybe_layer = maybeCurrentDynamicLayer();
  21007. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21008. int64_t cur_level = maybe_layer->layerId();
  21009. if (!isBatchedAtLevel(exponent, cur_level)) {
  21010. return at::_ops::float_power_Scalar::call(self, exponent);
  21011. }
  21012. Tensor exponent_value;
  21013. optional<int64_t> exponent_bdim;
  21014. std::tie(exponent_value, exponent_bdim) = unwrapTensorAtLevel(exponent, cur_level);
  21015. auto results = batch_rule(self, exponent_value, exponent_bdim);
  21016. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  21017. }
  21018. template <typename batch_rule_t, batch_rule_t batch_rule>
  21019. at::Tensor float_power_Tensor_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & exponent) {
  21020. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21021. auto maybe_layer = maybeCurrentDynamicLayer();
  21022. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21023. int64_t cur_level = maybe_layer->layerId();
  21024. if (!isBatchedAtLevel(self, cur_level)) {
  21025. return at::_ops::float_power_Tensor_Scalar::call(self, exponent);
  21026. }
  21027. Tensor self_value;
  21028. optional<int64_t> self_bdim;
  21029. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  21030. auto results = batch_rule(self_value, self_bdim, exponent);
  21031. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  21032. }
  21033. template <typename batch_rule_t, batch_rule_t batch_rule>
  21034. at::Tensor & float_power__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & exponent) {
  21035. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21036. auto maybe_layer = maybeCurrentDynamicLayer();
  21037. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  21038. int64_t cur_level = maybe_layer->layerId();
  21039. if (!isBatchedAtLevel(self, cur_level)) {
  21040. return at::_ops::float_power__Scalar::call(self, exponent);
  21041. }
  21042. Tensor self_value;
  21043. optional<int64_t> self_bdim;
  21044. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  21045. batch_rule(self_value, self_bdim, exponent);
  21046. return self;
  21047. }
  21048. template <typename batch_rule_t, batch_rule_t batch_rule>
  21049. at::Tensor & float_power__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & exponent) {
  21050. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21051. auto maybe_layer = maybeCurrentDynamicLayer();
  21052. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  21053. int64_t cur_level = maybe_layer->layerId();
  21054. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(exponent, cur_level)) {
  21055. return at::_ops::float_power__Tensor::call(self, exponent);
  21056. }
  21057. Tensor self_value;
  21058. optional<int64_t> self_bdim;
  21059. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  21060. Tensor exponent_value;
  21061. optional<int64_t> exponent_bdim;
  21062. std::tie(exponent_value, exponent_bdim) = unwrapTensorAtLevel(exponent, cur_level);
  21063. batch_rule(self_value, self_bdim, exponent_value, exponent_bdim);
  21064. return self;
  21065. }
  21066. template <typename batch_rule_t, batch_rule_t batch_rule>
  21067. at::Tensor & normal__generated_plumbing(at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) {
  21068. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21069. auto maybe_layer = maybeCurrentDynamicLayer();
  21070. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  21071. int64_t cur_level = maybe_layer->layerId();
  21072. if (!isBatchedAtLevel(self, cur_level)) {
  21073. return at::_ops::normal_::call(self, mean, std, generator);
  21074. }
  21075. Tensor self_value;
  21076. optional<int64_t> self_bdim;
  21077. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  21078. batch_rule(self_value, self_bdim, mean, std, generator);
  21079. return self;
  21080. }
  21081. template <typename batch_rule_t, batch_rule_t batch_rule>
  21082. at::Tensor normal_functional_generated_plumbing(const at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) {
  21083. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21084. auto maybe_layer = maybeCurrentDynamicLayer();
  21085. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21086. int64_t cur_level = maybe_layer->layerId();
  21087. if (!isBatchedAtLevel(self, cur_level)) {
  21088. return at::_ops::normal_functional::call(self, mean, std, generator);
  21089. }
  21090. Tensor self_value;
  21091. optional<int64_t> self_bdim;
  21092. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  21093. auto results = batch_rule(self_value, self_bdim, mean, std, generator);
  21094. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  21095. }
  21096. template <typename batch_rule_t, batch_rule_t batch_rule>
  21097. at::Tensor normal_Tensor_float_generated_plumbing(const at::Tensor & mean, double std, c10::optional<at::Generator> generator) {
  21098. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21099. auto maybe_layer = maybeCurrentDynamicLayer();
  21100. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21101. int64_t cur_level = maybe_layer->layerId();
  21102. if (!isBatchedAtLevel(mean, cur_level)) {
  21103. return at::_ops::normal_Tensor_float::call(mean, std, generator);
  21104. }
  21105. Tensor mean_value;
  21106. optional<int64_t> mean_bdim;
  21107. std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level);
  21108. auto results = batch_rule(mean_value, mean_bdim, std, generator);
  21109. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  21110. }
  21111. template <typename batch_rule_t, batch_rule_t batch_rule>
  21112. at::Tensor normal_float_Tensor_generated_plumbing(double mean, const at::Tensor & std, c10::optional<at::Generator> generator) {
  21113. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21114. auto maybe_layer = maybeCurrentDynamicLayer();
  21115. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21116. int64_t cur_level = maybe_layer->layerId();
  21117. if (!isBatchedAtLevel(std, cur_level)) {
  21118. return at::_ops::normal_float_Tensor::call(mean, std, generator);
  21119. }
  21120. Tensor std_value;
  21121. optional<int64_t> std_bdim;
  21122. std::tie(std_value, std_bdim) = unwrapTensorAtLevel(std, cur_level);
  21123. auto results = batch_rule(mean, std_value, std_bdim, generator);
  21124. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  21125. }
  21126. template <typename batch_rule_t, batch_rule_t batch_rule>
  21127. at::Tensor normal_Tensor_Tensor_generated_plumbing(const at::Tensor & mean, const at::Tensor & std, c10::optional<at::Generator> generator) {
  21128. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21129. auto maybe_layer = maybeCurrentDynamicLayer();
  21130. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21131. int64_t cur_level = maybe_layer->layerId();
  21132. if (!isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(std, cur_level)) {
  21133. return at::_ops::normal_Tensor_Tensor::call(mean, std, generator);
  21134. }
  21135. Tensor mean_value;
  21136. optional<int64_t> mean_bdim;
  21137. std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level);
  21138. Tensor std_value;
  21139. optional<int64_t> std_bdim;
  21140. std::tie(std_value, std_bdim) = unwrapTensorAtLevel(std, cur_level);
  21141. auto results = batch_rule(mean_value, mean_bdim, std_value, std_bdim, generator);
  21142. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  21143. }
  21144. template <typename batch_rule_t, batch_rule_t batch_rule>
  21145. at::Tensor alias_generated_plumbing(const at::Tensor & self) {
  21146. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21147. auto maybe_layer = maybeCurrentDynamicLayer();
  21148. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21149. int64_t cur_level = maybe_layer->layerId();
  21150. if (!isBatchedAtLevel(self, cur_level)) {
  21151. return at::_ops::alias::call(self);
  21152. }
  21153. Tensor self_value;
  21154. optional<int64_t> self_bdim;
  21155. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  21156. auto results = batch_rule(self_value, self_bdim);
  21157. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  21158. }
  21159. template <typename batch_rule_t, batch_rule_t batch_rule>
  21160. void _amp_foreach_non_finite_check_and_unscale__generated_plumbing(at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale) {
  21161. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21162. auto maybe_layer = maybeCurrentDynamicLayer();
  21163. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  21164. int64_t cur_level = maybe_layer->layerId();
  21165. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(inv_scale, cur_level)) {
  21166. return at::_ops::_amp_foreach_non_finite_check_and_unscale_::call(self, found_inf, inv_scale);
  21167. }
  21168. Tensor found_inf_value;
  21169. optional<int64_t> found_inf_bdim;
  21170. std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf, cur_level);
  21171. Tensor inv_scale_value;
  21172. optional<int64_t> inv_scale_bdim;
  21173. std::tie(inv_scale_value, inv_scale_bdim) = unwrapTensorAtLevel(inv_scale, cur_level);
  21174. batch_rule(self, found_inf_value, found_inf_bdim, inv_scale_value, inv_scale_bdim);
  21175. }
  21176. template <typename batch_rule_t, batch_rule_t batch_rule>
  21177. ::std::vector<at::Tensor> _foreach_add_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
  21178. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21179. auto maybe_layer = maybeCurrentDynamicLayer();
  21180. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21181. int64_t cur_level = maybe_layer->layerId();
  21182. if (!isBatchedAtLevel(self, cur_level)) {
  21183. return at::_ops::_foreach_add_Scalar::call(self, scalar);
  21184. }
  21185. auto results = batch_rule(self, scalar);
  21186. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  21187. }
  21188. template <typename batch_rule_t, batch_rule_t batch_rule>
  21189. void _foreach_add__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
  21190. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21191. auto maybe_layer = maybeCurrentDynamicLayer();
  21192. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  21193. int64_t cur_level = maybe_layer->layerId();
  21194. if (!isBatchedAtLevel(self, cur_level)) {
  21195. return at::_ops::_foreach_add__Scalar::call(self, scalar);
  21196. }
  21197. batch_rule(self, scalar);
  21198. }
  21199. template <typename batch_rule_t, batch_rule_t batch_rule>
  21200. ::std::vector<at::Tensor> _foreach_sub_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
  21201. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21202. auto maybe_layer = maybeCurrentDynamicLayer();
  21203. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21204. int64_t cur_level = maybe_layer->layerId();
  21205. if (!isBatchedAtLevel(self, cur_level)) {
  21206. return at::_ops::_foreach_sub_Scalar::call(self, scalar);
  21207. }
  21208. auto results = batch_rule(self, scalar);
  21209. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  21210. }
  21211. template <typename batch_rule_t, batch_rule_t batch_rule>
  21212. void _foreach_sub__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
  21213. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21214. auto maybe_layer = maybeCurrentDynamicLayer();
  21215. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  21216. int64_t cur_level = maybe_layer->layerId();
  21217. if (!isBatchedAtLevel(self, cur_level)) {
  21218. return at::_ops::_foreach_sub__Scalar::call(self, scalar);
  21219. }
  21220. batch_rule(self, scalar);
  21221. }
  21222. template <typename batch_rule_t, batch_rule_t batch_rule>
  21223. ::std::vector<at::Tensor> _foreach_mul_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
  21224. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21225. auto maybe_layer = maybeCurrentDynamicLayer();
  21226. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21227. int64_t cur_level = maybe_layer->layerId();
  21228. if (!isBatchedAtLevel(self, cur_level)) {
  21229. return at::_ops::_foreach_mul_Scalar::call(self, scalar);
  21230. }
  21231. auto results = batch_rule(self, scalar);
  21232. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  21233. }
  21234. template <typename batch_rule_t, batch_rule_t batch_rule>
  21235. void _foreach_mul__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
  21236. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21237. auto maybe_layer = maybeCurrentDynamicLayer();
  21238. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  21239. int64_t cur_level = maybe_layer->layerId();
  21240. if (!isBatchedAtLevel(self, cur_level)) {
  21241. return at::_ops::_foreach_mul__Scalar::call(self, scalar);
  21242. }
  21243. batch_rule(self, scalar);
  21244. }
  21245. template <typename batch_rule_t, batch_rule_t batch_rule>
  21246. ::std::vector<at::Tensor> _foreach_div_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
  21247. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21248. auto maybe_layer = maybeCurrentDynamicLayer();
  21249. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21250. int64_t cur_level = maybe_layer->layerId();
  21251. if (!isBatchedAtLevel(self, cur_level)) {
  21252. return at::_ops::_foreach_div_Scalar::call(self, scalar);
  21253. }
  21254. auto results = batch_rule(self, scalar);
  21255. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  21256. }
  21257. template <typename batch_rule_t, batch_rule_t batch_rule>
  21258. void _foreach_div__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
  21259. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21260. auto maybe_layer = maybeCurrentDynamicLayer();
  21261. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  21262. int64_t cur_level = maybe_layer->layerId();
  21263. if (!isBatchedAtLevel(self, cur_level)) {
  21264. return at::_ops::_foreach_div__Scalar::call(self, scalar);
  21265. }
  21266. batch_rule(self, scalar);
  21267. }
  21268. template <typename batch_rule_t, batch_rule_t batch_rule>
  21269. ::std::vector<at::Tensor> _foreach_clamp_min_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
  21270. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21271. auto maybe_layer = maybeCurrentDynamicLayer();
  21272. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21273. int64_t cur_level = maybe_layer->layerId();
  21274. if (!isBatchedAtLevel(self, cur_level)) {
  21275. return at::_ops::_foreach_clamp_min_Scalar::call(self, scalar);
  21276. }
  21277. auto results = batch_rule(self, scalar);
  21278. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  21279. }
  21280. template <typename batch_rule_t, batch_rule_t batch_rule>
  21281. void _foreach_clamp_min__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
  21282. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21283. auto maybe_layer = maybeCurrentDynamicLayer();
  21284. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  21285. int64_t cur_level = maybe_layer->layerId();
  21286. if (!isBatchedAtLevel(self, cur_level)) {
  21287. return at::_ops::_foreach_clamp_min__Scalar::call(self, scalar);
  21288. }
  21289. batch_rule(self, scalar);
  21290. }
  21291. template <typename batch_rule_t, batch_rule_t batch_rule>
  21292. ::std::vector<at::Tensor> _foreach_clamp_max_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
  21293. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21294. auto maybe_layer = maybeCurrentDynamicLayer();
  21295. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21296. int64_t cur_level = maybe_layer->layerId();
  21297. if (!isBatchedAtLevel(self, cur_level)) {
  21298. return at::_ops::_foreach_clamp_max_Scalar::call(self, scalar);
  21299. }
  21300. auto results = batch_rule(self, scalar);
  21301. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  21302. }
  21303. template <typename batch_rule_t, batch_rule_t batch_rule>
  21304. void _foreach_clamp_max__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
  21305. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21306. auto maybe_layer = maybeCurrentDynamicLayer();
  21307. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  21308. int64_t cur_level = maybe_layer->layerId();
  21309. if (!isBatchedAtLevel(self, cur_level)) {
  21310. return at::_ops::_foreach_clamp_max__Scalar::call(self, scalar);
  21311. }
  21312. batch_rule(self, scalar);
  21313. }
  21314. template <typename batch_rule_t, batch_rule_t batch_rule>
  21315. ::std::vector<at::Tensor> _foreach_maximum_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
  21316. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21317. auto maybe_layer = maybeCurrentDynamicLayer();
  21318. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21319. int64_t cur_level = maybe_layer->layerId();
  21320. if (!isBatchedAtLevel(self, cur_level)) {
  21321. return at::_ops::_foreach_maximum_Scalar::call(self, scalar);
  21322. }
  21323. auto results = batch_rule(self, scalar);
  21324. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  21325. }
  21326. template <typename batch_rule_t, batch_rule_t batch_rule>
  21327. void _foreach_maximum__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
  21328. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21329. auto maybe_layer = maybeCurrentDynamicLayer();
  21330. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  21331. int64_t cur_level = maybe_layer->layerId();
  21332. if (!isBatchedAtLevel(self, cur_level)) {
  21333. return at::_ops::_foreach_maximum__Scalar::call(self, scalar);
  21334. }
  21335. batch_rule(self, scalar);
  21336. }
  21337. template <typename batch_rule_t, batch_rule_t batch_rule>
  21338. ::std::vector<at::Tensor> _foreach_minimum_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
  21339. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21340. auto maybe_layer = maybeCurrentDynamicLayer();
  21341. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21342. int64_t cur_level = maybe_layer->layerId();
  21343. if (!isBatchedAtLevel(self, cur_level)) {
  21344. return at::_ops::_foreach_minimum_Scalar::call(self, scalar);
  21345. }
  21346. auto results = batch_rule(self, scalar);
  21347. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  21348. }
  21349. template <typename batch_rule_t, batch_rule_t batch_rule>
  21350. void _foreach_minimum__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
  21351. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21352. auto maybe_layer = maybeCurrentDynamicLayer();
  21353. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  21354. int64_t cur_level = maybe_layer->layerId();
  21355. if (!isBatchedAtLevel(self, cur_level)) {
  21356. return at::_ops::_foreach_minimum__Scalar::call(self, scalar);
  21357. }
  21358. batch_rule(self, scalar);
  21359. }
  21360. template <typename batch_rule_t, batch_rule_t batch_rule>
  21361. ::std::vector<at::Tensor> _foreach_add_List_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
  21362. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21363. auto maybe_layer = maybeCurrentDynamicLayer();
  21364. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21365. int64_t cur_level = maybe_layer->layerId();
  21366. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  21367. return at::_ops::_foreach_add_List::call(self, other, alpha);
  21368. }
  21369. auto results = batch_rule(self, other, alpha);
  21370. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  21371. }
  21372. template <typename batch_rule_t, batch_rule_t batch_rule>
  21373. void _foreach_add__List_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
  21374. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21375. auto maybe_layer = maybeCurrentDynamicLayer();
  21376. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  21377. int64_t cur_level = maybe_layer->layerId();
  21378. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  21379. return at::_ops::_foreach_add__List::call(self, other, alpha);
  21380. }
  21381. batch_rule(self, other, alpha);
  21382. }
  21383. template <typename batch_rule_t, batch_rule_t batch_rule>
  21384. ::std::vector<at::Tensor> _foreach_sub_List_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
  21385. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21386. auto maybe_layer = maybeCurrentDynamicLayer();
  21387. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21388. int64_t cur_level = maybe_layer->layerId();
  21389. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  21390. return at::_ops::_foreach_sub_List::call(self, other, alpha);
  21391. }
  21392. auto results = batch_rule(self, other, alpha);
  21393. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  21394. }
  21395. template <typename batch_rule_t, batch_rule_t batch_rule>
  21396. void _foreach_sub__List_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
  21397. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21398. auto maybe_layer = maybeCurrentDynamicLayer();
  21399. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  21400. int64_t cur_level = maybe_layer->layerId();
  21401. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  21402. return at::_ops::_foreach_sub__List::call(self, other, alpha);
  21403. }
  21404. batch_rule(self, other, alpha);
  21405. }
  21406. template <typename batch_rule_t, batch_rule_t batch_rule>
  21407. ::std::vector<at::Tensor> _foreach_mul_List_generated_plumbing(at::TensorList self, at::TensorList other) {
  21408. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21409. auto maybe_layer = maybeCurrentDynamicLayer();
  21410. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21411. int64_t cur_level = maybe_layer->layerId();
  21412. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  21413. return at::_ops::_foreach_mul_List::call(self, other);
  21414. }
  21415. auto results = batch_rule(self, other);
  21416. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  21417. }
  21418. template <typename batch_rule_t, batch_rule_t batch_rule>
  21419. void _foreach_mul__List_generated_plumbing(at::TensorList self, at::TensorList other) {
  21420. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21421. auto maybe_layer = maybeCurrentDynamicLayer();
  21422. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  21423. int64_t cur_level = maybe_layer->layerId();
  21424. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  21425. return at::_ops::_foreach_mul__List::call(self, other);
  21426. }
  21427. batch_rule(self, other);
  21428. }
  21429. template <typename batch_rule_t, batch_rule_t batch_rule>
  21430. ::std::vector<at::Tensor> _foreach_div_List_generated_plumbing(at::TensorList self, at::TensorList other) {
  21431. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21432. auto maybe_layer = maybeCurrentDynamicLayer();
  21433. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21434. int64_t cur_level = maybe_layer->layerId();
  21435. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  21436. return at::_ops::_foreach_div_List::call(self, other);
  21437. }
  21438. auto results = batch_rule(self, other);
  21439. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  21440. }
  21441. template <typename batch_rule_t, batch_rule_t batch_rule>
  21442. void _foreach_div__List_generated_plumbing(at::TensorList self, at::TensorList other) {
  21443. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21444. auto maybe_layer = maybeCurrentDynamicLayer();
  21445. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  21446. int64_t cur_level = maybe_layer->layerId();
  21447. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  21448. return at::_ops::_foreach_div__List::call(self, other);
  21449. }
  21450. batch_rule(self, other);
  21451. }
  21452. template <typename batch_rule_t, batch_rule_t batch_rule>
  21453. ::std::vector<at::Tensor> _foreach_clamp_min_List_generated_plumbing(at::TensorList self, at::TensorList other) {
  21454. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21455. auto maybe_layer = maybeCurrentDynamicLayer();
  21456. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21457. int64_t cur_level = maybe_layer->layerId();
  21458. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  21459. return at::_ops::_foreach_clamp_min_List::call(self, other);
  21460. }
  21461. auto results = batch_rule(self, other);
  21462. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  21463. }
  21464. template <typename batch_rule_t, batch_rule_t batch_rule>
  21465. void _foreach_clamp_min__List_generated_plumbing(at::TensorList self, at::TensorList other) {
  21466. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21467. auto maybe_layer = maybeCurrentDynamicLayer();
  21468. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  21469. int64_t cur_level = maybe_layer->layerId();
  21470. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  21471. return at::_ops::_foreach_clamp_min__List::call(self, other);
  21472. }
  21473. batch_rule(self, other);
  21474. }
  21475. template <typename batch_rule_t, batch_rule_t batch_rule>
  21476. ::std::vector<at::Tensor> _foreach_clamp_max_List_generated_plumbing(at::TensorList self, at::TensorList other) {
  21477. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21478. auto maybe_layer = maybeCurrentDynamicLayer();
  21479. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21480. int64_t cur_level = maybe_layer->layerId();
  21481. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  21482. return at::_ops::_foreach_clamp_max_List::call(self, other);
  21483. }
  21484. auto results = batch_rule(self, other);
  21485. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  21486. }
  21487. template <typename batch_rule_t, batch_rule_t batch_rule>
  21488. void _foreach_clamp_max__List_generated_plumbing(at::TensorList self, at::TensorList other) {
  21489. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21490. auto maybe_layer = maybeCurrentDynamicLayer();
  21491. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  21492. int64_t cur_level = maybe_layer->layerId();
  21493. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  21494. return at::_ops::_foreach_clamp_max__List::call(self, other);
  21495. }
  21496. batch_rule(self, other);
  21497. }
  21498. template <typename batch_rule_t, batch_rule_t batch_rule>
  21499. ::std::vector<at::Tensor> _foreach_maximum_List_generated_plumbing(at::TensorList self, at::TensorList other) {
  21500. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21501. auto maybe_layer = maybeCurrentDynamicLayer();
  21502. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21503. int64_t cur_level = maybe_layer->layerId();
  21504. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  21505. return at::_ops::_foreach_maximum_List::call(self, other);
  21506. }
  21507. auto results = batch_rule(self, other);
  21508. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  21509. }
  21510. template <typename batch_rule_t, batch_rule_t batch_rule>
  21511. void _foreach_maximum__List_generated_plumbing(at::TensorList self, at::TensorList other) {
  21512. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21513. auto maybe_layer = maybeCurrentDynamicLayer();
  21514. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  21515. int64_t cur_level = maybe_layer->layerId();
  21516. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  21517. return at::_ops::_foreach_maximum__List::call(self, other);
  21518. }
  21519. batch_rule(self, other);
  21520. }
  21521. template <typename batch_rule_t, batch_rule_t batch_rule>
  21522. ::std::vector<at::Tensor> _foreach_minimum_List_generated_plumbing(at::TensorList self, at::TensorList other) {
  21523. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21524. auto maybe_layer = maybeCurrentDynamicLayer();
  21525. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21526. int64_t cur_level = maybe_layer->layerId();
  21527. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  21528. return at::_ops::_foreach_minimum_List::call(self, other);
  21529. }
  21530. auto results = batch_rule(self, other);
  21531. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  21532. }
  21533. template <typename batch_rule_t, batch_rule_t batch_rule>
  21534. void _foreach_minimum__List_generated_plumbing(at::TensorList self, at::TensorList other) {
  21535. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21536. auto maybe_layer = maybeCurrentDynamicLayer();
  21537. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  21538. int64_t cur_level = maybe_layer->layerId();
  21539. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  21540. return at::_ops::_foreach_minimum__List::call(self, other);
  21541. }
  21542. batch_rule(self, other);
  21543. }
  21544. template <typename batch_rule_t, batch_rule_t batch_rule>
  21545. ::std::vector<at::Tensor> _foreach_add_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
  21546. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21547. auto maybe_layer = maybeCurrentDynamicLayer();
  21548. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21549. int64_t cur_level = maybe_layer->layerId();
  21550. if (!isBatchedAtLevel(self, cur_level)) {
  21551. return at::_ops::_foreach_add_ScalarList::call(self, scalars);
  21552. }
  21553. auto results = batch_rule(self, scalars);
  21554. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  21555. }
  21556. template <typename batch_rule_t, batch_rule_t batch_rule>
  21557. void _foreach_add__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
  21558. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21559. auto maybe_layer = maybeCurrentDynamicLayer();
  21560. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  21561. int64_t cur_level = maybe_layer->layerId();
  21562. if (!isBatchedAtLevel(self, cur_level)) {
  21563. return at::_ops::_foreach_add__ScalarList::call(self, scalars);
  21564. }
  21565. batch_rule(self, scalars);
  21566. }
  21567. template <typename batch_rule_t, batch_rule_t batch_rule>
  21568. ::std::vector<at::Tensor> _foreach_sub_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
  21569. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21570. auto maybe_layer = maybeCurrentDynamicLayer();
  21571. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21572. int64_t cur_level = maybe_layer->layerId();
  21573. if (!isBatchedAtLevel(self, cur_level)) {
  21574. return at::_ops::_foreach_sub_ScalarList::call(self, scalars);
  21575. }
  21576. auto results = batch_rule(self, scalars);
  21577. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  21578. }
  21579. template <typename batch_rule_t, batch_rule_t batch_rule>
  21580. void _foreach_sub__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
  21581. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21582. auto maybe_layer = maybeCurrentDynamicLayer();
  21583. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  21584. int64_t cur_level = maybe_layer->layerId();
  21585. if (!isBatchedAtLevel(self, cur_level)) {
  21586. return at::_ops::_foreach_sub__ScalarList::call(self, scalars);
  21587. }
  21588. batch_rule(self, scalars);
  21589. }
  21590. template <typename batch_rule_t, batch_rule_t batch_rule>
  21591. ::std::vector<at::Tensor> _foreach_div_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
  21592. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21593. auto maybe_layer = maybeCurrentDynamicLayer();
  21594. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21595. int64_t cur_level = maybe_layer->layerId();
  21596. if (!isBatchedAtLevel(self, cur_level)) {
  21597. return at::_ops::_foreach_div_ScalarList::call(self, scalars);
  21598. }
  21599. auto results = batch_rule(self, scalars);
  21600. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  21601. }
  21602. template <typename batch_rule_t, batch_rule_t batch_rule>
  21603. void _foreach_div__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
  21604. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21605. auto maybe_layer = maybeCurrentDynamicLayer();
  21606. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  21607. int64_t cur_level = maybe_layer->layerId();
  21608. if (!isBatchedAtLevel(self, cur_level)) {
  21609. return at::_ops::_foreach_div__ScalarList::call(self, scalars);
  21610. }
  21611. batch_rule(self, scalars);
  21612. }
  21613. template <typename batch_rule_t, batch_rule_t batch_rule>
  21614. ::std::vector<at::Tensor> _foreach_mul_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
  21615. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21616. auto maybe_layer = maybeCurrentDynamicLayer();
  21617. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21618. int64_t cur_level = maybe_layer->layerId();
  21619. if (!isBatchedAtLevel(self, cur_level)) {
  21620. return at::_ops::_foreach_mul_ScalarList::call(self, scalars);
  21621. }
  21622. auto results = batch_rule(self, scalars);
  21623. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  21624. }
  21625. template <typename batch_rule_t, batch_rule_t batch_rule>
  21626. void _foreach_mul__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
  21627. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21628. auto maybe_layer = maybeCurrentDynamicLayer();
  21629. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  21630. int64_t cur_level = maybe_layer->layerId();
  21631. if (!isBatchedAtLevel(self, cur_level)) {
  21632. return at::_ops::_foreach_mul__ScalarList::call(self, scalars);
  21633. }
  21634. batch_rule(self, scalars);
  21635. }
  21636. template <typename batch_rule_t, batch_rule_t batch_rule>
  21637. ::std::vector<at::Tensor> _foreach_clamp_min_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
  21638. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21639. auto maybe_layer = maybeCurrentDynamicLayer();
  21640. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21641. int64_t cur_level = maybe_layer->layerId();
  21642. if (!isBatchedAtLevel(self, cur_level)) {
  21643. return at::_ops::_foreach_clamp_min_ScalarList::call(self, scalars);
  21644. }
  21645. auto results = batch_rule(self, scalars);
  21646. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  21647. }
  21648. template <typename batch_rule_t, batch_rule_t batch_rule>
  21649. void _foreach_clamp_min__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
  21650. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21651. auto maybe_layer = maybeCurrentDynamicLayer();
  21652. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  21653. int64_t cur_level = maybe_layer->layerId();
  21654. if (!isBatchedAtLevel(self, cur_level)) {
  21655. return at::_ops::_foreach_clamp_min__ScalarList::call(self, scalars);
  21656. }
  21657. batch_rule(self, scalars);
  21658. }
  21659. template <typename batch_rule_t, batch_rule_t batch_rule>
  21660. ::std::vector<at::Tensor> _foreach_clamp_max_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
  21661. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21662. auto maybe_layer = maybeCurrentDynamicLayer();
  21663. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21664. int64_t cur_level = maybe_layer->layerId();
  21665. if (!isBatchedAtLevel(self, cur_level)) {
  21666. return at::_ops::_foreach_clamp_max_ScalarList::call(self, scalars);
  21667. }
  21668. auto results = batch_rule(self, scalars);
  21669. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  21670. }
  21671. template <typename batch_rule_t, batch_rule_t batch_rule>
  21672. void _foreach_clamp_max__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
  21673. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21674. auto maybe_layer = maybeCurrentDynamicLayer();
  21675. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  21676. int64_t cur_level = maybe_layer->layerId();
  21677. if (!isBatchedAtLevel(self, cur_level)) {
  21678. return at::_ops::_foreach_clamp_max__ScalarList::call(self, scalars);
  21679. }
  21680. batch_rule(self, scalars);
  21681. }
  21682. template <typename batch_rule_t, batch_rule_t batch_rule>
  21683. ::std::vector<at::Tensor> _foreach_maximum_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
  21684. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21685. auto maybe_layer = maybeCurrentDynamicLayer();
  21686. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21687. int64_t cur_level = maybe_layer->layerId();
  21688. if (!isBatchedAtLevel(self, cur_level)) {
  21689. return at::_ops::_foreach_maximum_ScalarList::call(self, scalars);
  21690. }
  21691. auto results = batch_rule(self, scalars);
  21692. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  21693. }
  21694. template <typename batch_rule_t, batch_rule_t batch_rule>
  21695. void _foreach_maximum__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
  21696. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21697. auto maybe_layer = maybeCurrentDynamicLayer();
  21698. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  21699. int64_t cur_level = maybe_layer->layerId();
  21700. if (!isBatchedAtLevel(self, cur_level)) {
  21701. return at::_ops::_foreach_maximum__ScalarList::call(self, scalars);
  21702. }
  21703. batch_rule(self, scalars);
  21704. }
  21705. template <typename batch_rule_t, batch_rule_t batch_rule>
  21706. ::std::vector<at::Tensor> _foreach_minimum_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
  21707. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21708. auto maybe_layer = maybeCurrentDynamicLayer();
  21709. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21710. int64_t cur_level = maybe_layer->layerId();
  21711. if (!isBatchedAtLevel(self, cur_level)) {
  21712. return at::_ops::_foreach_minimum_ScalarList::call(self, scalars);
  21713. }
  21714. auto results = batch_rule(self, scalars);
  21715. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  21716. }
  21717. template <typename batch_rule_t, batch_rule_t batch_rule>
  21718. void _foreach_minimum__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
  21719. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21720. auto maybe_layer = maybeCurrentDynamicLayer();
  21721. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  21722. int64_t cur_level = maybe_layer->layerId();
  21723. if (!isBatchedAtLevel(self, cur_level)) {
  21724. return at::_ops::_foreach_minimum__ScalarList::call(self, scalars);
  21725. }
  21726. batch_rule(self, scalars);
  21727. }
  21728. template <typename batch_rule_t, batch_rule_t batch_rule>
  21729. ::std::vector<at::Tensor> _foreach_exp_generated_plumbing(at::TensorList self) {
  21730. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21731. auto maybe_layer = maybeCurrentDynamicLayer();
  21732. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21733. int64_t cur_level = maybe_layer->layerId();
  21734. if (!isBatchedAtLevel(self, cur_level)) {
  21735. return at::_ops::_foreach_exp::call(self);
  21736. }
  21737. auto results = batch_rule(self);
  21738. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  21739. }
  21740. template <typename batch_rule_t, batch_rule_t batch_rule>
  21741. void _foreach_zero__generated_plumbing(at::TensorList self) {
  21742. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21743. auto maybe_layer = maybeCurrentDynamicLayer();
  21744. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  21745. int64_t cur_level = maybe_layer->layerId();
  21746. if (!isBatchedAtLevel(self, cur_level)) {
  21747. return at::_ops::_foreach_zero_::call(self);
  21748. }
  21749. batch_rule(self);
  21750. }
  21751. template <typename batch_rule_t, batch_rule_t batch_rule>
  21752. void _foreach_exp__generated_plumbing(at::TensorList self) {
  21753. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21754. auto maybe_layer = maybeCurrentDynamicLayer();
  21755. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  21756. int64_t cur_level = maybe_layer->layerId();
  21757. if (!isBatchedAtLevel(self, cur_level)) {
  21758. return at::_ops::_foreach_exp_::call(self);
  21759. }
  21760. batch_rule(self);
  21761. }
  21762. template <typename batch_rule_t, batch_rule_t batch_rule>
  21763. ::std::vector<at::Tensor> _foreach_sqrt_generated_plumbing(at::TensorList self) {
  21764. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21765. auto maybe_layer = maybeCurrentDynamicLayer();
  21766. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21767. int64_t cur_level = maybe_layer->layerId();
  21768. if (!isBatchedAtLevel(self, cur_level)) {
  21769. return at::_ops::_foreach_sqrt::call(self);
  21770. }
  21771. auto results = batch_rule(self);
  21772. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  21773. }
  21774. template <typename batch_rule_t, batch_rule_t batch_rule>
  21775. void _foreach_sqrt__generated_plumbing(at::TensorList self) {
  21776. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21777. auto maybe_layer = maybeCurrentDynamicLayer();
  21778. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  21779. int64_t cur_level = maybe_layer->layerId();
  21780. if (!isBatchedAtLevel(self, cur_level)) {
  21781. return at::_ops::_foreach_sqrt_::call(self);
  21782. }
  21783. batch_rule(self);
  21784. }
  21785. template <typename batch_rule_t, batch_rule_t batch_rule>
  21786. ::std::vector<at::Tensor> _foreach_abs_generated_plumbing(at::TensorList self) {
  21787. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21788. auto maybe_layer = maybeCurrentDynamicLayer();
  21789. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21790. int64_t cur_level = maybe_layer->layerId();
  21791. if (!isBatchedAtLevel(self, cur_level)) {
  21792. return at::_ops::_foreach_abs::call(self);
  21793. }
  21794. auto results = batch_rule(self);
  21795. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  21796. }
  21797. template <typename batch_rule_t, batch_rule_t batch_rule>
  21798. void _foreach_abs__generated_plumbing(at::TensorList self) {
  21799. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21800. auto maybe_layer = maybeCurrentDynamicLayer();
  21801. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  21802. int64_t cur_level = maybe_layer->layerId();
  21803. if (!isBatchedAtLevel(self, cur_level)) {
  21804. return at::_ops::_foreach_abs_::call(self);
  21805. }
  21806. batch_rule(self);
  21807. }
  21808. template <typename batch_rule_t, batch_rule_t batch_rule>
  21809. ::std::vector<at::Tensor> _foreach_acos_generated_plumbing(at::TensorList self) {
  21810. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21811. auto maybe_layer = maybeCurrentDynamicLayer();
  21812. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21813. int64_t cur_level = maybe_layer->layerId();
  21814. if (!isBatchedAtLevel(self, cur_level)) {
  21815. return at::_ops::_foreach_acos::call(self);
  21816. }
  21817. auto results = batch_rule(self);
  21818. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  21819. }
  21820. template <typename batch_rule_t, batch_rule_t batch_rule>
  21821. void _foreach_acos__generated_plumbing(at::TensorList self) {
  21822. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21823. auto maybe_layer = maybeCurrentDynamicLayer();
  21824. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  21825. int64_t cur_level = maybe_layer->layerId();
  21826. if (!isBatchedAtLevel(self, cur_level)) {
  21827. return at::_ops::_foreach_acos_::call(self);
  21828. }
  21829. batch_rule(self);
  21830. }
  21831. template <typename batch_rule_t, batch_rule_t batch_rule>
  21832. ::std::vector<at::Tensor> _foreach_asin_generated_plumbing(at::TensorList self) {
  21833. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21834. auto maybe_layer = maybeCurrentDynamicLayer();
  21835. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21836. int64_t cur_level = maybe_layer->layerId();
  21837. if (!isBatchedAtLevel(self, cur_level)) {
  21838. return at::_ops::_foreach_asin::call(self);
  21839. }
  21840. auto results = batch_rule(self);
  21841. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  21842. }
  21843. template <typename batch_rule_t, batch_rule_t batch_rule>
  21844. void _foreach_asin__generated_plumbing(at::TensorList self) {
  21845. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21846. auto maybe_layer = maybeCurrentDynamicLayer();
  21847. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  21848. int64_t cur_level = maybe_layer->layerId();
  21849. if (!isBatchedAtLevel(self, cur_level)) {
  21850. return at::_ops::_foreach_asin_::call(self);
  21851. }
  21852. batch_rule(self);
  21853. }
  21854. template <typename batch_rule_t, batch_rule_t batch_rule>
  21855. ::std::vector<at::Tensor> _foreach_atan_generated_plumbing(at::TensorList self) {
  21856. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21857. auto maybe_layer = maybeCurrentDynamicLayer();
  21858. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21859. int64_t cur_level = maybe_layer->layerId();
  21860. if (!isBatchedAtLevel(self, cur_level)) {
  21861. return at::_ops::_foreach_atan::call(self);
  21862. }
  21863. auto results = batch_rule(self);
  21864. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  21865. }
  21866. template <typename batch_rule_t, batch_rule_t batch_rule>
  21867. void _foreach_atan__generated_plumbing(at::TensorList self) {
  21868. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21869. auto maybe_layer = maybeCurrentDynamicLayer();
  21870. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  21871. int64_t cur_level = maybe_layer->layerId();
  21872. if (!isBatchedAtLevel(self, cur_level)) {
  21873. return at::_ops::_foreach_atan_::call(self);
  21874. }
  21875. batch_rule(self);
  21876. }
  21877. template <typename batch_rule_t, batch_rule_t batch_rule>
  21878. ::std::vector<at::Tensor> _foreach_ceil_generated_plumbing(at::TensorList self) {
  21879. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21880. auto maybe_layer = maybeCurrentDynamicLayer();
  21881. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21882. int64_t cur_level = maybe_layer->layerId();
  21883. if (!isBatchedAtLevel(self, cur_level)) {
  21884. return at::_ops::_foreach_ceil::call(self);
  21885. }
  21886. auto results = batch_rule(self);
  21887. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  21888. }
  21889. template <typename batch_rule_t, batch_rule_t batch_rule>
  21890. void _foreach_ceil__generated_plumbing(at::TensorList self) {
  21891. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21892. auto maybe_layer = maybeCurrentDynamicLayer();
  21893. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  21894. int64_t cur_level = maybe_layer->layerId();
  21895. if (!isBatchedAtLevel(self, cur_level)) {
  21896. return at::_ops::_foreach_ceil_::call(self);
  21897. }
  21898. batch_rule(self);
  21899. }
  21900. template <typename batch_rule_t, batch_rule_t batch_rule>
  21901. ::std::vector<at::Tensor> _foreach_cos_generated_plumbing(at::TensorList self) {
  21902. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21903. auto maybe_layer = maybeCurrentDynamicLayer();
  21904. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21905. int64_t cur_level = maybe_layer->layerId();
  21906. if (!isBatchedAtLevel(self, cur_level)) {
  21907. return at::_ops::_foreach_cos::call(self);
  21908. }
  21909. auto results = batch_rule(self);
  21910. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  21911. }
  21912. template <typename batch_rule_t, batch_rule_t batch_rule>
  21913. void _foreach_cos__generated_plumbing(at::TensorList self) {
  21914. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21915. auto maybe_layer = maybeCurrentDynamicLayer();
  21916. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  21917. int64_t cur_level = maybe_layer->layerId();
  21918. if (!isBatchedAtLevel(self, cur_level)) {
  21919. return at::_ops::_foreach_cos_::call(self);
  21920. }
  21921. batch_rule(self);
  21922. }
  21923. template <typename batch_rule_t, batch_rule_t batch_rule>
  21924. ::std::vector<at::Tensor> _foreach_cosh_generated_plumbing(at::TensorList self) {
  21925. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21926. auto maybe_layer = maybeCurrentDynamicLayer();
  21927. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21928. int64_t cur_level = maybe_layer->layerId();
  21929. if (!isBatchedAtLevel(self, cur_level)) {
  21930. return at::_ops::_foreach_cosh::call(self);
  21931. }
  21932. auto results = batch_rule(self);
  21933. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  21934. }
  21935. template <typename batch_rule_t, batch_rule_t batch_rule>
  21936. void _foreach_cosh__generated_plumbing(at::TensorList self) {
  21937. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21938. auto maybe_layer = maybeCurrentDynamicLayer();
  21939. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  21940. int64_t cur_level = maybe_layer->layerId();
  21941. if (!isBatchedAtLevel(self, cur_level)) {
  21942. return at::_ops::_foreach_cosh_::call(self);
  21943. }
  21944. batch_rule(self);
  21945. }
  21946. template <typename batch_rule_t, batch_rule_t batch_rule>
  21947. ::std::vector<at::Tensor> _foreach_erf_generated_plumbing(at::TensorList self) {
  21948. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21949. auto maybe_layer = maybeCurrentDynamicLayer();
  21950. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21951. int64_t cur_level = maybe_layer->layerId();
  21952. if (!isBatchedAtLevel(self, cur_level)) {
  21953. return at::_ops::_foreach_erf::call(self);
  21954. }
  21955. auto results = batch_rule(self);
  21956. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  21957. }
  21958. template <typename batch_rule_t, batch_rule_t batch_rule>
  21959. void _foreach_erf__generated_plumbing(at::TensorList self) {
  21960. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21961. auto maybe_layer = maybeCurrentDynamicLayer();
  21962. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  21963. int64_t cur_level = maybe_layer->layerId();
  21964. if (!isBatchedAtLevel(self, cur_level)) {
  21965. return at::_ops::_foreach_erf_::call(self);
  21966. }
  21967. batch_rule(self);
  21968. }
  21969. template <typename batch_rule_t, batch_rule_t batch_rule>
  21970. ::std::vector<at::Tensor> _foreach_erfc_generated_plumbing(at::TensorList self) {
  21971. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21972. auto maybe_layer = maybeCurrentDynamicLayer();
  21973. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21974. int64_t cur_level = maybe_layer->layerId();
  21975. if (!isBatchedAtLevel(self, cur_level)) {
  21976. return at::_ops::_foreach_erfc::call(self);
  21977. }
  21978. auto results = batch_rule(self);
  21979. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  21980. }
  21981. template <typename batch_rule_t, batch_rule_t batch_rule>
  21982. void _foreach_erfc__generated_plumbing(at::TensorList self) {
  21983. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21984. auto maybe_layer = maybeCurrentDynamicLayer();
  21985. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  21986. int64_t cur_level = maybe_layer->layerId();
  21987. if (!isBatchedAtLevel(self, cur_level)) {
  21988. return at::_ops::_foreach_erfc_::call(self);
  21989. }
  21990. batch_rule(self);
  21991. }
  21992. template <typename batch_rule_t, batch_rule_t batch_rule>
  21993. ::std::vector<at::Tensor> _foreach_expm1_generated_plumbing(at::TensorList self) {
  21994. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  21995. auto maybe_layer = maybeCurrentDynamicLayer();
  21996. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  21997. int64_t cur_level = maybe_layer->layerId();
  21998. if (!isBatchedAtLevel(self, cur_level)) {
  21999. return at::_ops::_foreach_expm1::call(self);
  22000. }
  22001. auto results = batch_rule(self);
  22002. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  22003. }
  22004. template <typename batch_rule_t, batch_rule_t batch_rule>
  22005. void _foreach_expm1__generated_plumbing(at::TensorList self) {
  22006. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22007. auto maybe_layer = maybeCurrentDynamicLayer();
  22008. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  22009. int64_t cur_level = maybe_layer->layerId();
  22010. if (!isBatchedAtLevel(self, cur_level)) {
  22011. return at::_ops::_foreach_expm1_::call(self);
  22012. }
  22013. batch_rule(self);
  22014. }
  22015. template <typename batch_rule_t, batch_rule_t batch_rule>
  22016. ::std::vector<at::Tensor> _foreach_floor_generated_plumbing(at::TensorList self) {
  22017. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22018. auto maybe_layer = maybeCurrentDynamicLayer();
  22019. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22020. int64_t cur_level = maybe_layer->layerId();
  22021. if (!isBatchedAtLevel(self, cur_level)) {
  22022. return at::_ops::_foreach_floor::call(self);
  22023. }
  22024. auto results = batch_rule(self);
  22025. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  22026. }
  22027. template <typename batch_rule_t, batch_rule_t batch_rule>
  22028. void _foreach_floor__generated_plumbing(at::TensorList self) {
  22029. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22030. auto maybe_layer = maybeCurrentDynamicLayer();
  22031. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  22032. int64_t cur_level = maybe_layer->layerId();
  22033. if (!isBatchedAtLevel(self, cur_level)) {
  22034. return at::_ops::_foreach_floor_::call(self);
  22035. }
  22036. batch_rule(self);
  22037. }
  22038. template <typename batch_rule_t, batch_rule_t batch_rule>
  22039. ::std::vector<at::Tensor> _foreach_log_generated_plumbing(at::TensorList self) {
  22040. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22041. auto maybe_layer = maybeCurrentDynamicLayer();
  22042. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22043. int64_t cur_level = maybe_layer->layerId();
  22044. if (!isBatchedAtLevel(self, cur_level)) {
  22045. return at::_ops::_foreach_log::call(self);
  22046. }
  22047. auto results = batch_rule(self);
  22048. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  22049. }
  22050. template <typename batch_rule_t, batch_rule_t batch_rule>
  22051. void _foreach_log__generated_plumbing(at::TensorList self) {
  22052. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22053. auto maybe_layer = maybeCurrentDynamicLayer();
  22054. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  22055. int64_t cur_level = maybe_layer->layerId();
  22056. if (!isBatchedAtLevel(self, cur_level)) {
  22057. return at::_ops::_foreach_log_::call(self);
  22058. }
  22059. batch_rule(self);
  22060. }
  22061. template <typename batch_rule_t, batch_rule_t batch_rule>
  22062. ::std::vector<at::Tensor> _foreach_log10_generated_plumbing(at::TensorList self) {
  22063. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22064. auto maybe_layer = maybeCurrentDynamicLayer();
  22065. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22066. int64_t cur_level = maybe_layer->layerId();
  22067. if (!isBatchedAtLevel(self, cur_level)) {
  22068. return at::_ops::_foreach_log10::call(self);
  22069. }
  22070. auto results = batch_rule(self);
  22071. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  22072. }
  22073. template <typename batch_rule_t, batch_rule_t batch_rule>
  22074. void _foreach_log10__generated_plumbing(at::TensorList self) {
  22075. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22076. auto maybe_layer = maybeCurrentDynamicLayer();
  22077. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  22078. int64_t cur_level = maybe_layer->layerId();
  22079. if (!isBatchedAtLevel(self, cur_level)) {
  22080. return at::_ops::_foreach_log10_::call(self);
  22081. }
  22082. batch_rule(self);
  22083. }
  22084. template <typename batch_rule_t, batch_rule_t batch_rule>
  22085. ::std::vector<at::Tensor> _foreach_log1p_generated_plumbing(at::TensorList self) {
  22086. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22087. auto maybe_layer = maybeCurrentDynamicLayer();
  22088. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22089. int64_t cur_level = maybe_layer->layerId();
  22090. if (!isBatchedAtLevel(self, cur_level)) {
  22091. return at::_ops::_foreach_log1p::call(self);
  22092. }
  22093. auto results = batch_rule(self);
  22094. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  22095. }
  22096. template <typename batch_rule_t, batch_rule_t batch_rule>
  22097. void _foreach_log1p__generated_plumbing(at::TensorList self) {
  22098. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22099. auto maybe_layer = maybeCurrentDynamicLayer();
  22100. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  22101. int64_t cur_level = maybe_layer->layerId();
  22102. if (!isBatchedAtLevel(self, cur_level)) {
  22103. return at::_ops::_foreach_log1p_::call(self);
  22104. }
  22105. batch_rule(self);
  22106. }
  22107. template <typename batch_rule_t, batch_rule_t batch_rule>
  22108. ::std::vector<at::Tensor> _foreach_log2_generated_plumbing(at::TensorList self) {
  22109. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22110. auto maybe_layer = maybeCurrentDynamicLayer();
  22111. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22112. int64_t cur_level = maybe_layer->layerId();
  22113. if (!isBatchedAtLevel(self, cur_level)) {
  22114. return at::_ops::_foreach_log2::call(self);
  22115. }
  22116. auto results = batch_rule(self);
  22117. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  22118. }
  22119. template <typename batch_rule_t, batch_rule_t batch_rule>
  22120. void _foreach_log2__generated_plumbing(at::TensorList self) {
  22121. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22122. auto maybe_layer = maybeCurrentDynamicLayer();
  22123. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  22124. int64_t cur_level = maybe_layer->layerId();
  22125. if (!isBatchedAtLevel(self, cur_level)) {
  22126. return at::_ops::_foreach_log2_::call(self);
  22127. }
  22128. batch_rule(self);
  22129. }
  22130. template <typename batch_rule_t, batch_rule_t batch_rule>
  22131. ::std::vector<at::Tensor> _foreach_neg_generated_plumbing(at::TensorList self) {
  22132. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22133. auto maybe_layer = maybeCurrentDynamicLayer();
  22134. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22135. int64_t cur_level = maybe_layer->layerId();
  22136. if (!isBatchedAtLevel(self, cur_level)) {
  22137. return at::_ops::_foreach_neg::call(self);
  22138. }
  22139. auto results = batch_rule(self);
  22140. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  22141. }
  22142. template <typename batch_rule_t, batch_rule_t batch_rule>
  22143. void _foreach_neg__generated_plumbing(at::TensorList self) {
  22144. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22145. auto maybe_layer = maybeCurrentDynamicLayer();
  22146. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  22147. int64_t cur_level = maybe_layer->layerId();
  22148. if (!isBatchedAtLevel(self, cur_level)) {
  22149. return at::_ops::_foreach_neg_::call(self);
  22150. }
  22151. batch_rule(self);
  22152. }
  22153. template <typename batch_rule_t, batch_rule_t batch_rule>
  22154. ::std::vector<at::Tensor> _foreach_tan_generated_plumbing(at::TensorList self) {
  22155. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22156. auto maybe_layer = maybeCurrentDynamicLayer();
  22157. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22158. int64_t cur_level = maybe_layer->layerId();
  22159. if (!isBatchedAtLevel(self, cur_level)) {
  22160. return at::_ops::_foreach_tan::call(self);
  22161. }
  22162. auto results = batch_rule(self);
  22163. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  22164. }
  22165. template <typename batch_rule_t, batch_rule_t batch_rule>
  22166. void _foreach_tan__generated_plumbing(at::TensorList self) {
  22167. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22168. auto maybe_layer = maybeCurrentDynamicLayer();
  22169. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  22170. int64_t cur_level = maybe_layer->layerId();
  22171. if (!isBatchedAtLevel(self, cur_level)) {
  22172. return at::_ops::_foreach_tan_::call(self);
  22173. }
  22174. batch_rule(self);
  22175. }
  22176. template <typename batch_rule_t, batch_rule_t batch_rule>
  22177. ::std::vector<at::Tensor> _foreach_tanh_generated_plumbing(at::TensorList self) {
  22178. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22179. auto maybe_layer = maybeCurrentDynamicLayer();
  22180. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22181. int64_t cur_level = maybe_layer->layerId();
  22182. if (!isBatchedAtLevel(self, cur_level)) {
  22183. return at::_ops::_foreach_tanh::call(self);
  22184. }
  22185. auto results = batch_rule(self);
  22186. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  22187. }
  22188. template <typename batch_rule_t, batch_rule_t batch_rule>
  22189. void _foreach_tanh__generated_plumbing(at::TensorList self) {
  22190. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22191. auto maybe_layer = maybeCurrentDynamicLayer();
  22192. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  22193. int64_t cur_level = maybe_layer->layerId();
  22194. if (!isBatchedAtLevel(self, cur_level)) {
  22195. return at::_ops::_foreach_tanh_::call(self);
  22196. }
  22197. batch_rule(self);
  22198. }
  22199. template <typename batch_rule_t, batch_rule_t batch_rule>
  22200. ::std::vector<at::Tensor> _foreach_sin_generated_plumbing(at::TensorList self) {
  22201. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22202. auto maybe_layer = maybeCurrentDynamicLayer();
  22203. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22204. int64_t cur_level = maybe_layer->layerId();
  22205. if (!isBatchedAtLevel(self, cur_level)) {
  22206. return at::_ops::_foreach_sin::call(self);
  22207. }
  22208. auto results = batch_rule(self);
  22209. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  22210. }
  22211. template <typename batch_rule_t, batch_rule_t batch_rule>
  22212. void _foreach_sin__generated_plumbing(at::TensorList self) {
  22213. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22214. auto maybe_layer = maybeCurrentDynamicLayer();
  22215. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  22216. int64_t cur_level = maybe_layer->layerId();
  22217. if (!isBatchedAtLevel(self, cur_level)) {
  22218. return at::_ops::_foreach_sin_::call(self);
  22219. }
  22220. batch_rule(self);
  22221. }
  22222. template <typename batch_rule_t, batch_rule_t batch_rule>
  22223. ::std::vector<at::Tensor> _foreach_sinh_generated_plumbing(at::TensorList self) {
  22224. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22225. auto maybe_layer = maybeCurrentDynamicLayer();
  22226. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22227. int64_t cur_level = maybe_layer->layerId();
  22228. if (!isBatchedAtLevel(self, cur_level)) {
  22229. return at::_ops::_foreach_sinh::call(self);
  22230. }
  22231. auto results = batch_rule(self);
  22232. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  22233. }
  22234. template <typename batch_rule_t, batch_rule_t batch_rule>
  22235. void _foreach_sinh__generated_plumbing(at::TensorList self) {
  22236. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22237. auto maybe_layer = maybeCurrentDynamicLayer();
  22238. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  22239. int64_t cur_level = maybe_layer->layerId();
  22240. if (!isBatchedAtLevel(self, cur_level)) {
  22241. return at::_ops::_foreach_sinh_::call(self);
  22242. }
  22243. batch_rule(self);
  22244. }
  22245. template <typename batch_rule_t, batch_rule_t batch_rule>
  22246. ::std::vector<at::Tensor> _foreach_round_generated_plumbing(at::TensorList self) {
  22247. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22248. auto maybe_layer = maybeCurrentDynamicLayer();
  22249. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22250. int64_t cur_level = maybe_layer->layerId();
  22251. if (!isBatchedAtLevel(self, cur_level)) {
  22252. return at::_ops::_foreach_round::call(self);
  22253. }
  22254. auto results = batch_rule(self);
  22255. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  22256. }
  22257. template <typename batch_rule_t, batch_rule_t batch_rule>
  22258. void _foreach_round__generated_plumbing(at::TensorList self) {
  22259. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22260. auto maybe_layer = maybeCurrentDynamicLayer();
  22261. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  22262. int64_t cur_level = maybe_layer->layerId();
  22263. if (!isBatchedAtLevel(self, cur_level)) {
  22264. return at::_ops::_foreach_round_::call(self);
  22265. }
  22266. batch_rule(self);
  22267. }
  22268. template <typename batch_rule_t, batch_rule_t batch_rule>
  22269. ::std::vector<at::Tensor> _foreach_lgamma_generated_plumbing(at::TensorList self) {
  22270. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22271. auto maybe_layer = maybeCurrentDynamicLayer();
  22272. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22273. int64_t cur_level = maybe_layer->layerId();
  22274. if (!isBatchedAtLevel(self, cur_level)) {
  22275. return at::_ops::_foreach_lgamma::call(self);
  22276. }
  22277. auto results = batch_rule(self);
  22278. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  22279. }
  22280. template <typename batch_rule_t, batch_rule_t batch_rule>
  22281. void _foreach_lgamma__generated_plumbing(at::TensorList self) {
  22282. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22283. auto maybe_layer = maybeCurrentDynamicLayer();
  22284. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  22285. int64_t cur_level = maybe_layer->layerId();
  22286. if (!isBatchedAtLevel(self, cur_level)) {
  22287. return at::_ops::_foreach_lgamma_::call(self);
  22288. }
  22289. batch_rule(self);
  22290. }
  22291. template <typename batch_rule_t, batch_rule_t batch_rule>
  22292. ::std::vector<at::Tensor> _foreach_frac_generated_plumbing(at::TensorList self) {
  22293. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22294. auto maybe_layer = maybeCurrentDynamicLayer();
  22295. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22296. int64_t cur_level = maybe_layer->layerId();
  22297. if (!isBatchedAtLevel(self, cur_level)) {
  22298. return at::_ops::_foreach_frac::call(self);
  22299. }
  22300. auto results = batch_rule(self);
  22301. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  22302. }
  22303. template <typename batch_rule_t, batch_rule_t batch_rule>
  22304. void _foreach_frac__generated_plumbing(at::TensorList self) {
  22305. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22306. auto maybe_layer = maybeCurrentDynamicLayer();
  22307. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  22308. int64_t cur_level = maybe_layer->layerId();
  22309. if (!isBatchedAtLevel(self, cur_level)) {
  22310. return at::_ops::_foreach_frac_::call(self);
  22311. }
  22312. batch_rule(self);
  22313. }
  22314. template <typename batch_rule_t, batch_rule_t batch_rule>
  22315. ::std::vector<at::Tensor> _foreach_reciprocal_generated_plumbing(at::TensorList self) {
  22316. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22317. auto maybe_layer = maybeCurrentDynamicLayer();
  22318. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22319. int64_t cur_level = maybe_layer->layerId();
  22320. if (!isBatchedAtLevel(self, cur_level)) {
  22321. return at::_ops::_foreach_reciprocal::call(self);
  22322. }
  22323. auto results = batch_rule(self);
  22324. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  22325. }
  22326. template <typename batch_rule_t, batch_rule_t batch_rule>
  22327. void _foreach_reciprocal__generated_plumbing(at::TensorList self) {
  22328. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22329. auto maybe_layer = maybeCurrentDynamicLayer();
  22330. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  22331. int64_t cur_level = maybe_layer->layerId();
  22332. if (!isBatchedAtLevel(self, cur_level)) {
  22333. return at::_ops::_foreach_reciprocal_::call(self);
  22334. }
  22335. batch_rule(self);
  22336. }
  22337. template <typename batch_rule_t, batch_rule_t batch_rule>
  22338. ::std::vector<at::Tensor> _foreach_sigmoid_generated_plumbing(at::TensorList self) {
  22339. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22340. auto maybe_layer = maybeCurrentDynamicLayer();
  22341. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22342. int64_t cur_level = maybe_layer->layerId();
  22343. if (!isBatchedAtLevel(self, cur_level)) {
  22344. return at::_ops::_foreach_sigmoid::call(self);
  22345. }
  22346. auto results = batch_rule(self);
  22347. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  22348. }
  22349. template <typename batch_rule_t, batch_rule_t batch_rule>
  22350. void _foreach_sigmoid__generated_plumbing(at::TensorList self) {
  22351. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22352. auto maybe_layer = maybeCurrentDynamicLayer();
  22353. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  22354. int64_t cur_level = maybe_layer->layerId();
  22355. if (!isBatchedAtLevel(self, cur_level)) {
  22356. return at::_ops::_foreach_sigmoid_::call(self);
  22357. }
  22358. batch_rule(self);
  22359. }
  22360. template <typename batch_rule_t, batch_rule_t batch_rule>
  22361. ::std::vector<at::Tensor> _foreach_trunc_generated_plumbing(at::TensorList self) {
  22362. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22363. auto maybe_layer = maybeCurrentDynamicLayer();
  22364. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22365. int64_t cur_level = maybe_layer->layerId();
  22366. if (!isBatchedAtLevel(self, cur_level)) {
  22367. return at::_ops::_foreach_trunc::call(self);
  22368. }
  22369. auto results = batch_rule(self);
  22370. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  22371. }
  22372. template <typename batch_rule_t, batch_rule_t batch_rule>
  22373. void _foreach_trunc__generated_plumbing(at::TensorList self) {
  22374. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22375. auto maybe_layer = maybeCurrentDynamicLayer();
  22376. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  22377. int64_t cur_level = maybe_layer->layerId();
  22378. if (!isBatchedAtLevel(self, cur_level)) {
  22379. return at::_ops::_foreach_trunc_::call(self);
  22380. }
  22381. batch_rule(self);
  22382. }
  22383. template <typename batch_rule_t, batch_rule_t batch_rule>
  22384. void _foreach_addcdiv__Scalar_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
  22385. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22386. auto maybe_layer = maybeCurrentDynamicLayer();
  22387. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  22388. int64_t cur_level = maybe_layer->layerId();
  22389. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
  22390. return at::_ops::_foreach_addcdiv__Scalar::call(self, tensor1, tensor2, value);
  22391. }
  22392. batch_rule(self, tensor1, tensor2, value);
  22393. }
  22394. template <typename batch_rule_t, batch_rule_t batch_rule>
  22395. void _foreach_addcmul__Scalar_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
  22396. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22397. auto maybe_layer = maybeCurrentDynamicLayer();
  22398. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  22399. int64_t cur_level = maybe_layer->layerId();
  22400. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
  22401. return at::_ops::_foreach_addcmul__Scalar::call(self, tensor1, tensor2, value);
  22402. }
  22403. batch_rule(self, tensor1, tensor2, value);
  22404. }
  22405. template <typename batch_rule_t, batch_rule_t batch_rule>
  22406. void _foreach_addcdiv__ScalarList_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
  22407. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22408. auto maybe_layer = maybeCurrentDynamicLayer();
  22409. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  22410. int64_t cur_level = maybe_layer->layerId();
  22411. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
  22412. return at::_ops::_foreach_addcdiv__ScalarList::call(self, tensor1, tensor2, scalars);
  22413. }
  22414. batch_rule(self, tensor1, tensor2, scalars);
  22415. }
  22416. template <typename batch_rule_t, batch_rule_t batch_rule>
  22417. void _foreach_addcdiv__Tensor_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
  22418. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22419. auto maybe_layer = maybeCurrentDynamicLayer();
  22420. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  22421. int64_t cur_level = maybe_layer->layerId();
  22422. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level)) {
  22423. return at::_ops::_foreach_addcdiv__Tensor::call(self, tensor1, tensor2, scalars);
  22424. }
  22425. Tensor scalars_value;
  22426. optional<int64_t> scalars_bdim;
  22427. std::tie(scalars_value, scalars_bdim) = unwrapTensorAtLevel(scalars, cur_level);
  22428. batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim);
  22429. }
  22430. template <typename batch_rule_t, batch_rule_t batch_rule>
  22431. void _foreach_addcmul__ScalarList_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
  22432. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22433. auto maybe_layer = maybeCurrentDynamicLayer();
  22434. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  22435. int64_t cur_level = maybe_layer->layerId();
  22436. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
  22437. return at::_ops::_foreach_addcmul__ScalarList::call(self, tensor1, tensor2, scalars);
  22438. }
  22439. batch_rule(self, tensor1, tensor2, scalars);
  22440. }
  22441. template <typename batch_rule_t, batch_rule_t batch_rule>
  22442. void _foreach_addcmul__Tensor_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
  22443. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22444. auto maybe_layer = maybeCurrentDynamicLayer();
  22445. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  22446. int64_t cur_level = maybe_layer->layerId();
  22447. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level)) {
  22448. return at::_ops::_foreach_addcmul__Tensor::call(self, tensor1, tensor2, scalars);
  22449. }
  22450. Tensor scalars_value;
  22451. optional<int64_t> scalars_bdim;
  22452. std::tie(scalars_value, scalars_bdim) = unwrapTensorAtLevel(scalars, cur_level);
  22453. batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim);
  22454. }
  22455. template <typename batch_rule_t, batch_rule_t batch_rule>
  22456. ::std::vector<at::Tensor> _foreach_addcdiv_Scalar_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
  22457. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22458. auto maybe_layer = maybeCurrentDynamicLayer();
  22459. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22460. int64_t cur_level = maybe_layer->layerId();
  22461. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
  22462. return at::_ops::_foreach_addcdiv_Scalar::call(self, tensor1, tensor2, value);
  22463. }
  22464. auto results = batch_rule(self, tensor1, tensor2, value);
  22465. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  22466. }
  22467. template <typename batch_rule_t, batch_rule_t batch_rule>
  22468. ::std::vector<at::Tensor> _foreach_addcmul_Scalar_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
  22469. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22470. auto maybe_layer = maybeCurrentDynamicLayer();
  22471. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22472. int64_t cur_level = maybe_layer->layerId();
  22473. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
  22474. return at::_ops::_foreach_addcmul_Scalar::call(self, tensor1, tensor2, value);
  22475. }
  22476. auto results = batch_rule(self, tensor1, tensor2, value);
  22477. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  22478. }
  22479. template <typename batch_rule_t, batch_rule_t batch_rule>
  22480. ::std::vector<at::Tensor> _foreach_addcdiv_ScalarList_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
  22481. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22482. auto maybe_layer = maybeCurrentDynamicLayer();
  22483. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22484. int64_t cur_level = maybe_layer->layerId();
  22485. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
  22486. return at::_ops::_foreach_addcdiv_ScalarList::call(self, tensor1, tensor2, scalars);
  22487. }
  22488. auto results = batch_rule(self, tensor1, tensor2, scalars);
  22489. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  22490. }
  22491. template <typename batch_rule_t, batch_rule_t batch_rule>
  22492. ::std::vector<at::Tensor> _foreach_addcdiv_Tensor_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
  22493. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22494. auto maybe_layer = maybeCurrentDynamicLayer();
  22495. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22496. int64_t cur_level = maybe_layer->layerId();
  22497. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level)) {
  22498. return at::_ops::_foreach_addcdiv_Tensor::call(self, tensor1, tensor2, scalars);
  22499. }
  22500. Tensor scalars_value;
  22501. optional<int64_t> scalars_bdim;
  22502. std::tie(scalars_value, scalars_bdim) = unwrapTensorAtLevel(scalars, cur_level);
  22503. auto results = batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim);
  22504. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  22505. }
  22506. template <typename batch_rule_t, batch_rule_t batch_rule>
  22507. ::std::vector<at::Tensor> _foreach_addcmul_ScalarList_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
  22508. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22509. auto maybe_layer = maybeCurrentDynamicLayer();
  22510. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22511. int64_t cur_level = maybe_layer->layerId();
  22512. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
  22513. return at::_ops::_foreach_addcmul_ScalarList::call(self, tensor1, tensor2, scalars);
  22514. }
  22515. auto results = batch_rule(self, tensor1, tensor2, scalars);
  22516. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  22517. }
  22518. template <typename batch_rule_t, batch_rule_t batch_rule>
  22519. ::std::vector<at::Tensor> _foreach_addcmul_Tensor_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
  22520. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22521. auto maybe_layer = maybeCurrentDynamicLayer();
  22522. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22523. int64_t cur_level = maybe_layer->layerId();
  22524. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level)) {
  22525. return at::_ops::_foreach_addcmul_Tensor::call(self, tensor1, tensor2, scalars);
  22526. }
  22527. Tensor scalars_value;
  22528. optional<int64_t> scalars_bdim;
  22529. std::tie(scalars_value, scalars_bdim) = unwrapTensorAtLevel(scalars, cur_level);
  22530. auto results = batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim);
  22531. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  22532. }
  22533. template <typename batch_rule_t, batch_rule_t batch_rule>
  22534. ::std::vector<at::Tensor> _foreach_norm_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & ord) {
  22535. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22536. auto maybe_layer = maybeCurrentDynamicLayer();
  22537. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22538. int64_t cur_level = maybe_layer->layerId();
  22539. if (!isBatchedAtLevel(self, cur_level)) {
  22540. return at::_ops::_foreach_norm_Scalar::call(self, ord);
  22541. }
  22542. auto results = batch_rule(self, ord);
  22543. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  22544. }
  22545. template <typename batch_rule_t, batch_rule_t batch_rule>
  22546. ::std::vector<at::Tensor> _foreach_lerp_List_generated_plumbing(at::TensorList self, at::TensorList tensors1, at::TensorList weights) {
  22547. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22548. auto maybe_layer = maybeCurrentDynamicLayer();
  22549. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22550. int64_t cur_level = maybe_layer->layerId();
  22551. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level) && !isBatchedAtLevel(weights, cur_level)) {
  22552. return at::_ops::_foreach_lerp_List::call(self, tensors1, weights);
  22553. }
  22554. auto results = batch_rule(self, tensors1, weights);
  22555. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  22556. }
  22557. template <typename batch_rule_t, batch_rule_t batch_rule>
  22558. void _foreach_lerp__List_generated_plumbing(at::TensorList self, at::TensorList tensors1, at::TensorList weights) {
  22559. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22560. auto maybe_layer = maybeCurrentDynamicLayer();
  22561. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  22562. int64_t cur_level = maybe_layer->layerId();
  22563. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level) && !isBatchedAtLevel(weights, cur_level)) {
  22564. return at::_ops::_foreach_lerp__List::call(self, tensors1, weights);
  22565. }
  22566. batch_rule(self, tensors1, weights);
  22567. }
  22568. template <typename batch_rule_t, batch_rule_t batch_rule>
  22569. ::std::vector<at::Tensor> _foreach_lerp_Scalar_generated_plumbing(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) {
  22570. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22571. auto maybe_layer = maybeCurrentDynamicLayer();
  22572. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22573. int64_t cur_level = maybe_layer->layerId();
  22574. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level)) {
  22575. return at::_ops::_foreach_lerp_Scalar::call(self, tensors1, weight);
  22576. }
  22577. auto results = batch_rule(self, tensors1, weight);
  22578. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  22579. }
  22580. template <typename batch_rule_t, batch_rule_t batch_rule>
  22581. void _foreach_lerp__Scalar_generated_plumbing(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) {
  22582. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22583. auto maybe_layer = maybeCurrentDynamicLayer();
  22584. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  22585. int64_t cur_level = maybe_layer->layerId();
  22586. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level)) {
  22587. return at::_ops::_foreach_lerp__Scalar::call(self, tensors1, weight);
  22588. }
  22589. batch_rule(self, tensors1, weight);
  22590. }
  22591. template <typename batch_rule_t, batch_rule_t batch_rule>
  22592. at::Tensor bucketize_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & boundaries, bool out_int32, bool right) {
  22593. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22594. auto maybe_layer = maybeCurrentDynamicLayer();
  22595. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22596. int64_t cur_level = maybe_layer->layerId();
  22597. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(boundaries, cur_level)) {
  22598. return at::_ops::bucketize_Tensor::call(self, boundaries, out_int32, right);
  22599. }
  22600. Tensor self_value;
  22601. optional<int64_t> self_bdim;
  22602. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  22603. Tensor boundaries_value;
  22604. optional<int64_t> boundaries_bdim;
  22605. std::tie(boundaries_value, boundaries_bdim) = unwrapTensorAtLevel(boundaries, cur_level);
  22606. auto results = batch_rule(self_value, self_bdim, boundaries_value, boundaries_bdim, out_int32, right);
  22607. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  22608. }
  22609. template <typename batch_rule_t, batch_rule_t batch_rule>
  22610. at::Tensor bucketize_Scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & boundaries, bool out_int32, bool right) {
  22611. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22612. auto maybe_layer = maybeCurrentDynamicLayer();
  22613. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22614. int64_t cur_level = maybe_layer->layerId();
  22615. if (!isBatchedAtLevel(boundaries, cur_level)) {
  22616. return at::_ops::bucketize_Scalar::call(self, boundaries, out_int32, right);
  22617. }
  22618. Tensor boundaries_value;
  22619. optional<int64_t> boundaries_bdim;
  22620. std::tie(boundaries_value, boundaries_bdim) = unwrapTensorAtLevel(boundaries, cur_level);
  22621. auto results = batch_rule(self, boundaries_value, boundaries_bdim, out_int32, right);
  22622. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  22623. }
  22624. template <typename batch_rule_t, batch_rule_t batch_rule>
  22625. at::Tensor searchsorted_Tensor_generated_plumbing(const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, c10::optional<c10::string_view> side, const c10::optional<at::Tensor> & sorter) {
  22626. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22627. auto maybe_layer = maybeCurrentDynamicLayer();
  22628. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22629. int64_t cur_level = maybe_layer->layerId();
  22630. if (!isBatchedAtLevel(sorted_sequence, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(sorter, cur_level)) {
  22631. return at::_ops::searchsorted_Tensor::call(sorted_sequence, self, out_int32, right, side, sorter);
  22632. }
  22633. Tensor sorted_sequence_value;
  22634. optional<int64_t> sorted_sequence_bdim;
  22635. std::tie(sorted_sequence_value, sorted_sequence_bdim) = unwrapTensorAtLevel(sorted_sequence, cur_level);
  22636. Tensor self_value;
  22637. optional<int64_t> self_bdim;
  22638. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  22639. optional<Tensor> sorter_value;
  22640. optional<int64_t> sorter_bdim;
  22641. if (sorter) {
  22642. std::tie(sorter_value, sorter_bdim) = unwrapTensorAtLevel(sorter.value(), cur_level);
  22643. }
  22644. auto results = batch_rule(sorted_sequence_value, sorted_sequence_bdim, self_value, self_bdim, out_int32, right, side, sorter_value, sorter_bdim);
  22645. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  22646. }
  22647. template <typename batch_rule_t, batch_rule_t batch_rule>
  22648. at::Tensor searchsorted_Scalar_generated_plumbing(const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32, bool right, c10::optional<c10::string_view> side, const c10::optional<at::Tensor> & sorter) {
  22649. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22650. auto maybe_layer = maybeCurrentDynamicLayer();
  22651. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22652. int64_t cur_level = maybe_layer->layerId();
  22653. if (!isBatchedAtLevel(sorted_sequence, cur_level) && !isBatchedAtLevel(sorter, cur_level)) {
  22654. return at::_ops::searchsorted_Scalar::call(sorted_sequence, self, out_int32, right, side, sorter);
  22655. }
  22656. Tensor sorted_sequence_value;
  22657. optional<int64_t> sorted_sequence_bdim;
  22658. std::tie(sorted_sequence_value, sorted_sequence_bdim) = unwrapTensorAtLevel(sorted_sequence, cur_level);
  22659. optional<Tensor> sorter_value;
  22660. optional<int64_t> sorter_bdim;
  22661. if (sorter) {
  22662. std::tie(sorter_value, sorter_bdim) = unwrapTensorAtLevel(sorter.value(), cur_level);
  22663. }
  22664. auto results = batch_rule(sorted_sequence_value, sorted_sequence_bdim, self, out_int32, right, side, sorter_value, sorter_bdim);
  22665. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  22666. }
  22667. template <typename batch_rule_t, batch_rule_t batch_rule>
  22668. at::Tensor _convert_indices_from_coo_to_csr_generated_plumbing(const at::Tensor & self, int64_t size, bool out_int32) {
  22669. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22670. auto maybe_layer = maybeCurrentDynamicLayer();
  22671. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22672. int64_t cur_level = maybe_layer->layerId();
  22673. if (!isBatchedAtLevel(self, cur_level)) {
  22674. return at::_ops::_convert_indices_from_coo_to_csr::call(self, size, out_int32);
  22675. }
  22676. Tensor self_value;
  22677. optional<int64_t> self_bdim;
  22678. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  22679. auto results = batch_rule(self_value, self_bdim, size, out_int32);
  22680. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  22681. }
  22682. template <typename batch_rule_t, batch_rule_t batch_rule>
  22683. at::Tensor _convert_indices_from_csr_to_coo_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32, bool transpose) {
  22684. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22685. auto maybe_layer = maybeCurrentDynamicLayer();
  22686. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22687. int64_t cur_level = maybe_layer->layerId();
  22688. if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level)) {
  22689. return at::_ops::_convert_indices_from_csr_to_coo::call(crow_indices, col_indices, out_int32, transpose);
  22690. }
  22691. Tensor crow_indices_value;
  22692. optional<int64_t> crow_indices_bdim;
  22693. std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level);
  22694. Tensor col_indices_value;
  22695. optional<int64_t> col_indices_bdim;
  22696. std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level);
  22697. auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, out_int32, transpose);
  22698. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  22699. }
  22700. template <typename batch_rule_t, batch_rule_t batch_rule>
  22701. at::Tensor mse_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
  22702. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22703. auto maybe_layer = maybeCurrentDynamicLayer();
  22704. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22705. int64_t cur_level = maybe_layer->layerId();
  22706. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
  22707. return at::_ops::mse_loss::call(self, target, reduction);
  22708. }
  22709. Tensor self_value;
  22710. optional<int64_t> self_bdim;
  22711. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  22712. Tensor target_value;
  22713. optional<int64_t> target_bdim;
  22714. std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
  22715. auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction);
  22716. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  22717. }
  22718. template <typename batch_rule_t, batch_rule_t batch_rule>
  22719. at::Tensor mse_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
  22720. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22721. auto maybe_layer = maybeCurrentDynamicLayer();
  22722. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22723. int64_t cur_level = maybe_layer->layerId();
  22724. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
  22725. return at::_ops::mse_loss_backward::call(grad_output, self, target, reduction);
  22726. }
  22727. Tensor grad_output_value;
  22728. optional<int64_t> grad_output_bdim;
  22729. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  22730. Tensor self_value;
  22731. optional<int64_t> self_bdim;
  22732. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  22733. Tensor target_value;
  22734. optional<int64_t> target_bdim;
  22735. std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
  22736. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, reduction);
  22737. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  22738. }
  22739. template <typename batch_rule_t, batch_rule_t batch_rule>
  22740. at::Tensor l1_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
  22741. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22742. auto maybe_layer = maybeCurrentDynamicLayer();
  22743. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22744. int64_t cur_level = maybe_layer->layerId();
  22745. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
  22746. return at::_ops::l1_loss::call(self, target, reduction);
  22747. }
  22748. Tensor self_value;
  22749. optional<int64_t> self_bdim;
  22750. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  22751. Tensor target_value;
  22752. optional<int64_t> target_bdim;
  22753. std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
  22754. auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction);
  22755. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  22756. }
  22757. template <typename batch_rule_t, batch_rule_t batch_rule>
  22758. at::Tensor multi_margin_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional<at::Tensor> & weight, int64_t reduction) {
  22759. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22760. auto maybe_layer = maybeCurrentDynamicLayer();
  22761. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22762. int64_t cur_level = maybe_layer->layerId();
  22763. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
  22764. return at::_ops::multi_margin_loss::call(self, target, p, margin, weight, reduction);
  22765. }
  22766. Tensor self_value;
  22767. optional<int64_t> self_bdim;
  22768. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  22769. Tensor target_value;
  22770. optional<int64_t> target_bdim;
  22771. std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
  22772. optional<Tensor> weight_value;
  22773. optional<int64_t> weight_bdim;
  22774. if (weight) {
  22775. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
  22776. }
  22777. auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, p, margin, weight_value, weight_bdim, reduction);
  22778. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  22779. }
  22780. template <typename batch_rule_t, batch_rule_t batch_rule>
  22781. at::Tensor multi_margin_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional<at::Tensor> & weight, int64_t reduction) {
  22782. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22783. auto maybe_layer = maybeCurrentDynamicLayer();
  22784. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22785. int64_t cur_level = maybe_layer->layerId();
  22786. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
  22787. return at::_ops::multi_margin_loss_backward::call(grad_output, self, target, p, margin, weight, reduction);
  22788. }
  22789. Tensor grad_output_value;
  22790. optional<int64_t> grad_output_bdim;
  22791. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  22792. Tensor self_value;
  22793. optional<int64_t> self_bdim;
  22794. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  22795. Tensor target_value;
  22796. optional<int64_t> target_bdim;
  22797. std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
  22798. optional<Tensor> weight_value;
  22799. optional<int64_t> weight_bdim;
  22800. if (weight) {
  22801. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
  22802. }
  22803. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, p, margin, weight_value, weight_bdim, reduction);
  22804. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  22805. }
  22806. template <typename batch_rule_t, batch_rule_t batch_rule>
  22807. at::Tensor multilabel_margin_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
  22808. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22809. auto maybe_layer = maybeCurrentDynamicLayer();
  22810. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22811. int64_t cur_level = maybe_layer->layerId();
  22812. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
  22813. return at::_ops::multilabel_margin_loss::call(self, target, reduction);
  22814. }
  22815. Tensor self_value;
  22816. optional<int64_t> self_bdim;
  22817. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  22818. Tensor target_value;
  22819. optional<int64_t> target_bdim;
  22820. std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
  22821. auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction);
  22822. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  22823. }
  22824. template <typename batch_rule_t, batch_rule_t batch_rule>
  22825. ::std::tuple<at::Tensor,at::Tensor> multilabel_margin_loss_forward_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
  22826. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22827. auto maybe_layer = maybeCurrentDynamicLayer();
  22828. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22829. int64_t cur_level = maybe_layer->layerId();
  22830. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
  22831. return at::_ops::multilabel_margin_loss_forward::call(self, target, reduction);
  22832. }
  22833. Tensor self_value;
  22834. optional<int64_t> self_bdim;
  22835. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  22836. Tensor target_value;
  22837. optional<int64_t> target_bdim;
  22838. std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
  22839. auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction);
  22840. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  22841. }
  22842. template <typename batch_rule_t, batch_rule_t batch_rule>
  22843. at::Tensor multilabel_margin_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target) {
  22844. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22845. auto maybe_layer = maybeCurrentDynamicLayer();
  22846. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22847. int64_t cur_level = maybe_layer->layerId();
  22848. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(is_target, cur_level)) {
  22849. return at::_ops::multilabel_margin_loss_backward::call(grad_output, self, target, reduction, is_target);
  22850. }
  22851. Tensor grad_output_value;
  22852. optional<int64_t> grad_output_bdim;
  22853. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  22854. Tensor self_value;
  22855. optional<int64_t> self_bdim;
  22856. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  22857. Tensor target_value;
  22858. optional<int64_t> target_bdim;
  22859. std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
  22860. Tensor is_target_value;
  22861. optional<int64_t> is_target_bdim;
  22862. std::tie(is_target_value, is_target_bdim) = unwrapTensorAtLevel(is_target, cur_level);
  22863. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, reduction, is_target_value, is_target_bdim);
  22864. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  22865. }
  22866. template <typename batch_rule_t, batch_rule_t batch_rule>
  22867. at::Tensor nll_loss_nd_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
  22868. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22869. auto maybe_layer = maybeCurrentDynamicLayer();
  22870. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22871. int64_t cur_level = maybe_layer->layerId();
  22872. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
  22873. return at::_ops::nll_loss_nd::call(self, target, weight, reduction, ignore_index);
  22874. }
  22875. Tensor self_value;
  22876. optional<int64_t> self_bdim;
  22877. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  22878. Tensor target_value;
  22879. optional<int64_t> target_bdim;
  22880. std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
  22881. optional<Tensor> weight_value;
  22882. optional<int64_t> weight_bdim;
  22883. if (weight) {
  22884. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
  22885. }
  22886. auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index);
  22887. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  22888. }
  22889. template <typename batch_rule_t, batch_rule_t batch_rule>
  22890. at::Tensor nll_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
  22891. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22892. auto maybe_layer = maybeCurrentDynamicLayer();
  22893. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22894. int64_t cur_level = maybe_layer->layerId();
  22895. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
  22896. return at::_ops::nll_loss::call(self, target, weight, reduction, ignore_index);
  22897. }
  22898. Tensor self_value;
  22899. optional<int64_t> self_bdim;
  22900. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  22901. Tensor target_value;
  22902. optional<int64_t> target_bdim;
  22903. std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
  22904. optional<Tensor> weight_value;
  22905. optional<int64_t> weight_bdim;
  22906. if (weight) {
  22907. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
  22908. }
  22909. auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index);
  22910. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  22911. }
  22912. template <typename batch_rule_t, batch_rule_t batch_rule>
  22913. ::std::tuple<at::Tensor,at::Tensor> nll_loss_forward_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
  22914. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22915. auto maybe_layer = maybeCurrentDynamicLayer();
  22916. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22917. int64_t cur_level = maybe_layer->layerId();
  22918. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
  22919. return at::_ops::nll_loss_forward::call(self, target, weight, reduction, ignore_index);
  22920. }
  22921. Tensor self_value;
  22922. optional<int64_t> self_bdim;
  22923. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  22924. Tensor target_value;
  22925. optional<int64_t> target_bdim;
  22926. std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
  22927. optional<Tensor> weight_value;
  22928. optional<int64_t> weight_bdim;
  22929. if (weight) {
  22930. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
  22931. }
  22932. auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index);
  22933. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  22934. }
  22935. template <typename batch_rule_t, batch_rule_t batch_rule>
  22936. at::Tensor nll_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) {
  22937. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22938. auto maybe_layer = maybeCurrentDynamicLayer();
  22939. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22940. int64_t cur_level = maybe_layer->layerId();
  22941. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(total_weight, cur_level)) {
  22942. return at::_ops::nll_loss_backward::call(grad_output, self, target, weight, reduction, ignore_index, total_weight);
  22943. }
  22944. Tensor grad_output_value;
  22945. optional<int64_t> grad_output_bdim;
  22946. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  22947. Tensor self_value;
  22948. optional<int64_t> self_bdim;
  22949. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  22950. Tensor target_value;
  22951. optional<int64_t> target_bdim;
  22952. std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
  22953. Tensor total_weight_value;
  22954. optional<int64_t> total_weight_bdim;
  22955. std::tie(total_weight_value, total_weight_bdim) = unwrapTensorAtLevel(total_weight, cur_level);
  22956. optional<Tensor> weight_value;
  22957. optional<int64_t> weight_bdim;
  22958. if (weight) {
  22959. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
  22960. }
  22961. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index, total_weight_value, total_weight_bdim);
  22962. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  22963. }
  22964. template <typename batch_rule_t, batch_rule_t batch_rule>
  22965. at::Tensor nll_loss2d_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
  22966. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22967. auto maybe_layer = maybeCurrentDynamicLayer();
  22968. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22969. int64_t cur_level = maybe_layer->layerId();
  22970. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
  22971. return at::_ops::nll_loss2d::call(self, target, weight, reduction, ignore_index);
  22972. }
  22973. Tensor self_value;
  22974. optional<int64_t> self_bdim;
  22975. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  22976. Tensor target_value;
  22977. optional<int64_t> target_bdim;
  22978. std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
  22979. optional<Tensor> weight_value;
  22980. optional<int64_t> weight_bdim;
  22981. if (weight) {
  22982. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
  22983. }
  22984. auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index);
  22985. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  22986. }
  22987. template <typename batch_rule_t, batch_rule_t batch_rule>
  22988. ::std::tuple<at::Tensor,at::Tensor> nll_loss2d_forward_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
  22989. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  22990. auto maybe_layer = maybeCurrentDynamicLayer();
  22991. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  22992. int64_t cur_level = maybe_layer->layerId();
  22993. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
  22994. return at::_ops::nll_loss2d_forward::call(self, target, weight, reduction, ignore_index);
  22995. }
  22996. Tensor self_value;
  22997. optional<int64_t> self_bdim;
  22998. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  22999. Tensor target_value;
  23000. optional<int64_t> target_bdim;
  23001. std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
  23002. optional<Tensor> weight_value;
  23003. optional<int64_t> weight_bdim;
  23004. if (weight) {
  23005. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
  23006. }
  23007. auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index);
  23008. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  23009. }
  23010. template <typename batch_rule_t, batch_rule_t batch_rule>
  23011. at::Tensor nll_loss2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) {
  23012. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23013. auto maybe_layer = maybeCurrentDynamicLayer();
  23014. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23015. int64_t cur_level = maybe_layer->layerId();
  23016. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(total_weight, cur_level)) {
  23017. return at::_ops::nll_loss2d_backward::call(grad_output, self, target, weight, reduction, ignore_index, total_weight);
  23018. }
  23019. Tensor grad_output_value;
  23020. optional<int64_t> grad_output_bdim;
  23021. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  23022. Tensor self_value;
  23023. optional<int64_t> self_bdim;
  23024. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23025. Tensor target_value;
  23026. optional<int64_t> target_bdim;
  23027. std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
  23028. Tensor total_weight_value;
  23029. optional<int64_t> total_weight_bdim;
  23030. std::tie(total_weight_value, total_weight_bdim) = unwrapTensorAtLevel(total_weight, cur_level);
  23031. optional<Tensor> weight_value;
  23032. optional<int64_t> weight_bdim;
  23033. if (weight) {
  23034. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
  23035. }
  23036. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index, total_weight_value, total_weight_bdim);
  23037. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23038. }
  23039. template <typename batch_rule_t, batch_rule_t batch_rule>
  23040. at::Tensor smooth_l1_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) {
  23041. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23042. auto maybe_layer = maybeCurrentDynamicLayer();
  23043. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23044. int64_t cur_level = maybe_layer->layerId();
  23045. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
  23046. return at::_ops::smooth_l1_loss::call(self, target, reduction, beta);
  23047. }
  23048. Tensor self_value;
  23049. optional<int64_t> self_bdim;
  23050. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23051. Tensor target_value;
  23052. optional<int64_t> target_bdim;
  23053. std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
  23054. auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction, beta);
  23055. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23056. }
  23057. template <typename batch_rule_t, batch_rule_t batch_rule>
  23058. at::Tensor smooth_l1_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) {
  23059. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23060. auto maybe_layer = maybeCurrentDynamicLayer();
  23061. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23062. int64_t cur_level = maybe_layer->layerId();
  23063. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
  23064. return at::_ops::smooth_l1_loss_backward::call(grad_output, self, target, reduction, beta);
  23065. }
  23066. Tensor grad_output_value;
  23067. optional<int64_t> grad_output_bdim;
  23068. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  23069. Tensor self_value;
  23070. optional<int64_t> self_bdim;
  23071. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23072. Tensor target_value;
  23073. optional<int64_t> target_bdim;
  23074. std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
  23075. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, reduction, beta);
  23076. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23077. }
  23078. template <typename batch_rule_t, batch_rule_t batch_rule>
  23079. at::Tensor huber_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta) {
  23080. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23081. auto maybe_layer = maybeCurrentDynamicLayer();
  23082. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23083. int64_t cur_level = maybe_layer->layerId();
  23084. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
  23085. return at::_ops::huber_loss::call(self, target, reduction, delta);
  23086. }
  23087. Tensor self_value;
  23088. optional<int64_t> self_bdim;
  23089. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23090. Tensor target_value;
  23091. optional<int64_t> target_bdim;
  23092. std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
  23093. auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction, delta);
  23094. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23095. }
  23096. template <typename batch_rule_t, batch_rule_t batch_rule>
  23097. at::Tensor huber_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta) {
  23098. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23099. auto maybe_layer = maybeCurrentDynamicLayer();
  23100. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23101. int64_t cur_level = maybe_layer->layerId();
  23102. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
  23103. return at::_ops::huber_loss_backward::call(grad_output, self, target, reduction, delta);
  23104. }
  23105. Tensor grad_output_value;
  23106. optional<int64_t> grad_output_bdim;
  23107. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  23108. Tensor self_value;
  23109. optional<int64_t> self_bdim;
  23110. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23111. Tensor target_value;
  23112. optional<int64_t> target_bdim;
  23113. std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
  23114. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, reduction, delta);
  23115. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23116. }
  23117. template <typename batch_rule_t, batch_rule_t batch_rule>
  23118. at::Tensor soft_margin_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
  23119. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23120. auto maybe_layer = maybeCurrentDynamicLayer();
  23121. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23122. int64_t cur_level = maybe_layer->layerId();
  23123. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
  23124. return at::_ops::soft_margin_loss::call(self, target, reduction);
  23125. }
  23126. Tensor self_value;
  23127. optional<int64_t> self_bdim;
  23128. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23129. Tensor target_value;
  23130. optional<int64_t> target_bdim;
  23131. std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
  23132. auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction);
  23133. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23134. }
  23135. template <typename batch_rule_t, batch_rule_t batch_rule>
  23136. at::Tensor soft_margin_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
  23137. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23138. auto maybe_layer = maybeCurrentDynamicLayer();
  23139. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23140. int64_t cur_level = maybe_layer->layerId();
  23141. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
  23142. return at::_ops::soft_margin_loss_backward::call(grad_output, self, target, reduction);
  23143. }
  23144. Tensor grad_output_value;
  23145. optional<int64_t> grad_output_bdim;
  23146. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  23147. Tensor self_value;
  23148. optional<int64_t> self_bdim;
  23149. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23150. Tensor target_value;
  23151. optional<int64_t> target_bdim;
  23152. std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
  23153. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, reduction);
  23154. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23155. }
  23156. template <typename batch_rule_t, batch_rule_t batch_rule>
  23157. at::Tensor elu_generated_plumbing(const at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale) {
  23158. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23159. auto maybe_layer = maybeCurrentDynamicLayer();
  23160. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23161. int64_t cur_level = maybe_layer->layerId();
  23162. if (!isBatchedAtLevel(self, cur_level)) {
  23163. return at::_ops::elu::call(self, alpha, scale, input_scale);
  23164. }
  23165. Tensor self_value;
  23166. optional<int64_t> self_bdim;
  23167. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23168. auto results = batch_rule(self_value, self_bdim, alpha, scale, input_scale);
  23169. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23170. }
  23171. template <typename batch_rule_t, batch_rule_t batch_rule>
  23172. at::Tensor elu_backward_generated_plumbing(const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result) {
  23173. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23174. auto maybe_layer = maybeCurrentDynamicLayer();
  23175. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23176. int64_t cur_level = maybe_layer->layerId();
  23177. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self_or_result, cur_level)) {
  23178. return at::_ops::elu_backward::call(grad_output, alpha, scale, input_scale, is_result, self_or_result);
  23179. }
  23180. Tensor grad_output_value;
  23181. optional<int64_t> grad_output_bdim;
  23182. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  23183. Tensor self_or_result_value;
  23184. optional<int64_t> self_or_result_bdim;
  23185. std::tie(self_or_result_value, self_or_result_bdim) = unwrapTensorAtLevel(self_or_result, cur_level);
  23186. auto results = batch_rule(grad_output_value, grad_output_bdim, alpha, scale, input_scale, is_result, self_or_result_value, self_or_result_bdim);
  23187. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23188. }
  23189. template <typename batch_rule_t, batch_rule_t batch_rule>
  23190. at::Tensor & elu__generated_plumbing(at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale) {
  23191. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23192. auto maybe_layer = maybeCurrentDynamicLayer();
  23193. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  23194. int64_t cur_level = maybe_layer->layerId();
  23195. if (!isBatchedAtLevel(self, cur_level)) {
  23196. return at::_ops::elu_::call(self, alpha, scale, input_scale);
  23197. }
  23198. Tensor self_value;
  23199. optional<int64_t> self_bdim;
  23200. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23201. batch_rule(self_value, self_bdim, alpha, scale, input_scale);
  23202. return self;
  23203. }
  23204. template <typename batch_rule_t, batch_rule_t batch_rule>
  23205. at::Tensor glu_generated_plumbing(const at::Tensor & self, int64_t dim) {
  23206. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23207. auto maybe_layer = maybeCurrentDynamicLayer();
  23208. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23209. int64_t cur_level = maybe_layer->layerId();
  23210. if (!isBatchedAtLevel(self, cur_level)) {
  23211. return at::_ops::glu::call(self, dim);
  23212. }
  23213. Tensor self_value;
  23214. optional<int64_t> self_bdim;
  23215. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23216. auto results = batch_rule(self_value, self_bdim, dim);
  23217. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23218. }
  23219. template <typename batch_rule_t, batch_rule_t batch_rule>
  23220. at::Tensor glu_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim) {
  23221. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23222. auto maybe_layer = maybeCurrentDynamicLayer();
  23223. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23224. int64_t cur_level = maybe_layer->layerId();
  23225. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
  23226. return at::_ops::glu_backward::call(grad_output, self, dim);
  23227. }
  23228. Tensor grad_output_value;
  23229. optional<int64_t> grad_output_bdim;
  23230. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  23231. Tensor self_value;
  23232. optional<int64_t> self_bdim;
  23233. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23234. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, dim);
  23235. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23236. }
  23237. template <typename batch_rule_t, batch_rule_t batch_rule>
  23238. at::Tensor glu_jvp_generated_plumbing(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim) {
  23239. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23240. auto maybe_layer = maybeCurrentDynamicLayer();
  23241. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23242. int64_t cur_level = maybe_layer->layerId();
  23243. if (!isBatchedAtLevel(glu, cur_level) && !isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(dx, cur_level)) {
  23244. return at::_ops::glu_jvp::call(glu, x, dx, dim);
  23245. }
  23246. Tensor glu_value;
  23247. optional<int64_t> glu_bdim;
  23248. std::tie(glu_value, glu_bdim) = unwrapTensorAtLevel(glu, cur_level);
  23249. Tensor x_value;
  23250. optional<int64_t> x_bdim;
  23251. std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
  23252. Tensor dx_value;
  23253. optional<int64_t> dx_bdim;
  23254. std::tie(dx_value, dx_bdim) = unwrapTensorAtLevel(dx, cur_level);
  23255. auto results = batch_rule(glu_value, glu_bdim, x_value, x_bdim, dx_value, dx_bdim, dim);
  23256. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23257. }
  23258. template <typename batch_rule_t, batch_rule_t batch_rule>
  23259. at::Tensor glu_backward_jvp_generated_plumbing(const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim) {
  23260. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23261. auto maybe_layer = maybeCurrentDynamicLayer();
  23262. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23263. int64_t cur_level = maybe_layer->layerId();
  23264. if (!isBatchedAtLevel(grad_x, cur_level) && !isBatchedAtLevel(grad_glu, cur_level) && !isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(dgrad_glu, cur_level) && !isBatchedAtLevel(dx, cur_level)) {
  23265. return at::_ops::glu_backward_jvp::call(grad_x, grad_glu, x, dgrad_glu, dx, dim);
  23266. }
  23267. Tensor grad_x_value;
  23268. optional<int64_t> grad_x_bdim;
  23269. std::tie(grad_x_value, grad_x_bdim) = unwrapTensorAtLevel(grad_x, cur_level);
  23270. Tensor grad_glu_value;
  23271. optional<int64_t> grad_glu_bdim;
  23272. std::tie(grad_glu_value, grad_glu_bdim) = unwrapTensorAtLevel(grad_glu, cur_level);
  23273. Tensor x_value;
  23274. optional<int64_t> x_bdim;
  23275. std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
  23276. Tensor dgrad_glu_value;
  23277. optional<int64_t> dgrad_glu_bdim;
  23278. std::tie(dgrad_glu_value, dgrad_glu_bdim) = unwrapTensorAtLevel(dgrad_glu, cur_level);
  23279. Tensor dx_value;
  23280. optional<int64_t> dx_bdim;
  23281. std::tie(dx_value, dx_bdim) = unwrapTensorAtLevel(dx, cur_level);
  23282. auto results = batch_rule(grad_x_value, grad_x_bdim, grad_glu_value, grad_glu_bdim, x_value, x_bdim, dgrad_glu_value, dgrad_glu_bdim, dx_value, dx_bdim, dim);
  23283. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23284. }
  23285. template <typename batch_rule_t, batch_rule_t batch_rule>
  23286. at::Tensor hardsigmoid_generated_plumbing(const at::Tensor & self) {
  23287. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23288. auto maybe_layer = maybeCurrentDynamicLayer();
  23289. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23290. int64_t cur_level = maybe_layer->layerId();
  23291. if (!isBatchedAtLevel(self, cur_level)) {
  23292. return at::_ops::hardsigmoid::call(self);
  23293. }
  23294. Tensor self_value;
  23295. optional<int64_t> self_bdim;
  23296. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23297. auto results = batch_rule(self_value, self_bdim);
  23298. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23299. }
  23300. template <typename batch_rule_t, batch_rule_t batch_rule>
  23301. at::Tensor & hardsigmoid__generated_plumbing(at::Tensor & self) {
  23302. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23303. auto maybe_layer = maybeCurrentDynamicLayer();
  23304. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  23305. int64_t cur_level = maybe_layer->layerId();
  23306. if (!isBatchedAtLevel(self, cur_level)) {
  23307. return at::_ops::hardsigmoid_::call(self);
  23308. }
  23309. Tensor self_value;
  23310. optional<int64_t> self_bdim;
  23311. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23312. batch_rule(self_value, self_bdim);
  23313. return self;
  23314. }
  23315. template <typename batch_rule_t, batch_rule_t batch_rule>
  23316. at::Tensor hardsigmoid_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) {
  23317. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23318. auto maybe_layer = maybeCurrentDynamicLayer();
  23319. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23320. int64_t cur_level = maybe_layer->layerId();
  23321. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
  23322. return at::_ops::hardsigmoid_backward::call(grad_output, self);
  23323. }
  23324. Tensor grad_output_value;
  23325. optional<int64_t> grad_output_bdim;
  23326. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  23327. Tensor self_value;
  23328. optional<int64_t> self_bdim;
  23329. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23330. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim);
  23331. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23332. }
  23333. template <typename batch_rule_t, batch_rule_t batch_rule>
  23334. at::Tensor hardtanh_generated_plumbing(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
  23335. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23336. auto maybe_layer = maybeCurrentDynamicLayer();
  23337. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23338. int64_t cur_level = maybe_layer->layerId();
  23339. if (!isBatchedAtLevel(self, cur_level)) {
  23340. return at::_ops::hardtanh::call(self, min_val, max_val);
  23341. }
  23342. Tensor self_value;
  23343. optional<int64_t> self_bdim;
  23344. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23345. auto results = batch_rule(self_value, self_bdim, min_val, max_val);
  23346. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23347. }
  23348. template <typename batch_rule_t, batch_rule_t batch_rule>
  23349. at::Tensor hardtanh_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
  23350. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23351. auto maybe_layer = maybeCurrentDynamicLayer();
  23352. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23353. int64_t cur_level = maybe_layer->layerId();
  23354. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
  23355. return at::_ops::hardtanh_backward::call(grad_output, self, min_val, max_val);
  23356. }
  23357. Tensor grad_output_value;
  23358. optional<int64_t> grad_output_bdim;
  23359. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  23360. Tensor self_value;
  23361. optional<int64_t> self_bdim;
  23362. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23363. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, min_val, max_val);
  23364. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23365. }
  23366. template <typename batch_rule_t, batch_rule_t batch_rule>
  23367. at::Tensor & hardtanh__generated_plumbing(at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
  23368. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23369. auto maybe_layer = maybeCurrentDynamicLayer();
  23370. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  23371. int64_t cur_level = maybe_layer->layerId();
  23372. if (!isBatchedAtLevel(self, cur_level)) {
  23373. return at::_ops::hardtanh_::call(self, min_val, max_val);
  23374. }
  23375. Tensor self_value;
  23376. optional<int64_t> self_bdim;
  23377. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23378. batch_rule(self_value, self_bdim, min_val, max_val);
  23379. return self;
  23380. }
  23381. template <typename batch_rule_t, batch_rule_t batch_rule>
  23382. at::Tensor hardswish_generated_plumbing(const at::Tensor & self) {
  23383. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23384. auto maybe_layer = maybeCurrentDynamicLayer();
  23385. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23386. int64_t cur_level = maybe_layer->layerId();
  23387. if (!isBatchedAtLevel(self, cur_level)) {
  23388. return at::_ops::hardswish::call(self);
  23389. }
  23390. Tensor self_value;
  23391. optional<int64_t> self_bdim;
  23392. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23393. auto results = batch_rule(self_value, self_bdim);
  23394. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23395. }
  23396. template <typename batch_rule_t, batch_rule_t batch_rule>
  23397. at::Tensor & hardswish__generated_plumbing(at::Tensor & self) {
  23398. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23399. auto maybe_layer = maybeCurrentDynamicLayer();
  23400. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  23401. int64_t cur_level = maybe_layer->layerId();
  23402. if (!isBatchedAtLevel(self, cur_level)) {
  23403. return at::_ops::hardswish_::call(self);
  23404. }
  23405. Tensor self_value;
  23406. optional<int64_t> self_bdim;
  23407. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23408. batch_rule(self_value, self_bdim);
  23409. return self;
  23410. }
  23411. template <typename batch_rule_t, batch_rule_t batch_rule>
  23412. at::Tensor hardswish_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) {
  23413. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23414. auto maybe_layer = maybeCurrentDynamicLayer();
  23415. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23416. int64_t cur_level = maybe_layer->layerId();
  23417. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
  23418. return at::_ops::hardswish_backward::call(grad_output, self);
  23419. }
  23420. Tensor grad_output_value;
  23421. optional<int64_t> grad_output_bdim;
  23422. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  23423. Tensor self_value;
  23424. optional<int64_t> self_bdim;
  23425. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23426. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim);
  23427. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23428. }
  23429. template <typename batch_rule_t, batch_rule_t batch_rule>
  23430. at::Tensor leaky_relu_generated_plumbing(const at::Tensor & self, const at::Scalar & negative_slope) {
  23431. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23432. auto maybe_layer = maybeCurrentDynamicLayer();
  23433. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23434. int64_t cur_level = maybe_layer->layerId();
  23435. if (!isBatchedAtLevel(self, cur_level)) {
  23436. return at::_ops::leaky_relu::call(self, negative_slope);
  23437. }
  23438. Tensor self_value;
  23439. optional<int64_t> self_bdim;
  23440. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23441. auto results = batch_rule(self_value, self_bdim, negative_slope);
  23442. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23443. }
  23444. template <typename batch_rule_t, batch_rule_t batch_rule>
  23445. at::Tensor leaky_relu_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result) {
  23446. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23447. auto maybe_layer = maybeCurrentDynamicLayer();
  23448. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23449. int64_t cur_level = maybe_layer->layerId();
  23450. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
  23451. return at::_ops::leaky_relu_backward::call(grad_output, self, negative_slope, self_is_result);
  23452. }
  23453. Tensor grad_output_value;
  23454. optional<int64_t> grad_output_bdim;
  23455. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  23456. Tensor self_value;
  23457. optional<int64_t> self_bdim;
  23458. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23459. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, negative_slope, self_is_result);
  23460. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23461. }
  23462. template <typename batch_rule_t, batch_rule_t batch_rule>
  23463. at::Tensor & leaky_relu__generated_plumbing(at::Tensor & self, const at::Scalar & negative_slope) {
  23464. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23465. auto maybe_layer = maybeCurrentDynamicLayer();
  23466. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  23467. int64_t cur_level = maybe_layer->layerId();
  23468. if (!isBatchedAtLevel(self, cur_level)) {
  23469. return at::_ops::leaky_relu_::call(self, negative_slope);
  23470. }
  23471. Tensor self_value;
  23472. optional<int64_t> self_bdim;
  23473. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23474. batch_rule(self_value, self_bdim, negative_slope);
  23475. return self;
  23476. }
  23477. template <typename batch_rule_t, batch_rule_t batch_rule>
  23478. at::Tensor log_sigmoid_generated_plumbing(const at::Tensor & self) {
  23479. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23480. auto maybe_layer = maybeCurrentDynamicLayer();
  23481. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23482. int64_t cur_level = maybe_layer->layerId();
  23483. if (!isBatchedAtLevel(self, cur_level)) {
  23484. return at::_ops::log_sigmoid::call(self);
  23485. }
  23486. Tensor self_value;
  23487. optional<int64_t> self_bdim;
  23488. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23489. auto results = batch_rule(self_value, self_bdim);
  23490. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23491. }
  23492. template <typename batch_rule_t, batch_rule_t batch_rule>
  23493. ::std::tuple<at::Tensor,at::Tensor> log_sigmoid_forward_generated_plumbing(const at::Tensor & self) {
  23494. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23495. auto maybe_layer = maybeCurrentDynamicLayer();
  23496. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23497. int64_t cur_level = maybe_layer->layerId();
  23498. if (!isBatchedAtLevel(self, cur_level)) {
  23499. return at::_ops::log_sigmoid_forward::call(self);
  23500. }
  23501. Tensor self_value;
  23502. optional<int64_t> self_bdim;
  23503. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23504. auto results = batch_rule(self_value, self_bdim);
  23505. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  23506. }
  23507. template <typename batch_rule_t, batch_rule_t batch_rule>
  23508. at::Tensor log_sigmoid_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer) {
  23509. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23510. auto maybe_layer = maybeCurrentDynamicLayer();
  23511. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23512. int64_t cur_level = maybe_layer->layerId();
  23513. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(buffer, cur_level)) {
  23514. return at::_ops::log_sigmoid_backward::call(grad_output, self, buffer);
  23515. }
  23516. Tensor grad_output_value;
  23517. optional<int64_t> grad_output_bdim;
  23518. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  23519. Tensor self_value;
  23520. optional<int64_t> self_bdim;
  23521. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23522. Tensor buffer_value;
  23523. optional<int64_t> buffer_bdim;
  23524. std::tie(buffer_value, buffer_bdim) = unwrapTensorAtLevel(buffer, cur_level);
  23525. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, buffer_value, buffer_bdim);
  23526. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23527. }
  23528. template <typename batch_rule_t, batch_rule_t batch_rule>
  23529. at::Tensor rrelu_with_noise_generated_plumbing(const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional<at::Generator> generator) {
  23530. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23531. auto maybe_layer = maybeCurrentDynamicLayer();
  23532. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23533. int64_t cur_level = maybe_layer->layerId();
  23534. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(noise, cur_level)) {
  23535. return at::_ops::rrelu_with_noise::call(self, noise, lower, upper, training, generator);
  23536. }
  23537. Tensor self_value;
  23538. optional<int64_t> self_bdim;
  23539. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23540. Tensor noise_value;
  23541. optional<int64_t> noise_bdim;
  23542. std::tie(noise_value, noise_bdim) = unwrapTensorAtLevel(noise, cur_level);
  23543. auto results = batch_rule(self_value, self_bdim, noise_value, noise_bdim, lower, upper, training, generator);
  23544. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23545. }
  23546. template <typename batch_rule_t, batch_rule_t batch_rule>
  23547. at::Tensor rrelu_with_noise_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, bool self_is_result) {
  23548. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23549. auto maybe_layer = maybeCurrentDynamicLayer();
  23550. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23551. int64_t cur_level = maybe_layer->layerId();
  23552. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(noise, cur_level)) {
  23553. return at::_ops::rrelu_with_noise_backward::call(grad_output, self, noise, lower, upper, training, self_is_result);
  23554. }
  23555. Tensor grad_output_value;
  23556. optional<int64_t> grad_output_bdim;
  23557. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  23558. Tensor self_value;
  23559. optional<int64_t> self_bdim;
  23560. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23561. Tensor noise_value;
  23562. optional<int64_t> noise_bdim;
  23563. std::tie(noise_value, noise_bdim) = unwrapTensorAtLevel(noise, cur_level);
  23564. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, noise_value, noise_bdim, lower, upper, training, self_is_result);
  23565. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23566. }
  23567. template <typename batch_rule_t, batch_rule_t batch_rule>
  23568. at::Tensor & rrelu_with_noise__generated_plumbing(at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional<at::Generator> generator) {
  23569. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23570. auto maybe_layer = maybeCurrentDynamicLayer();
  23571. vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
  23572. int64_t cur_level = maybe_layer->layerId();
  23573. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(noise, cur_level)) {
  23574. return at::_ops::rrelu_with_noise_::call(self, noise, lower, upper, training, generator);
  23575. }
  23576. Tensor self_value;
  23577. optional<int64_t> self_bdim;
  23578. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23579. Tensor noise_value;
  23580. optional<int64_t> noise_bdim;
  23581. std::tie(noise_value, noise_bdim) = unwrapTensorAtLevel(noise, cur_level);
  23582. batch_rule(self_value, self_bdim, noise_value, noise_bdim, lower, upper, training, generator);
  23583. return self;
  23584. }
  23585. template <typename batch_rule_t, batch_rule_t batch_rule>
  23586. at::Tensor softplus_generated_plumbing(const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) {
  23587. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23588. auto maybe_layer = maybeCurrentDynamicLayer();
  23589. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23590. int64_t cur_level = maybe_layer->layerId();
  23591. if (!isBatchedAtLevel(self, cur_level)) {
  23592. return at::_ops::softplus::call(self, beta, threshold);
  23593. }
  23594. Tensor self_value;
  23595. optional<int64_t> self_bdim;
  23596. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23597. auto results = batch_rule(self_value, self_bdim, beta, threshold);
  23598. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23599. }
  23600. template <typename batch_rule_t, batch_rule_t batch_rule>
  23601. at::Tensor softplus_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) {
  23602. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23603. auto maybe_layer = maybeCurrentDynamicLayer();
  23604. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23605. int64_t cur_level = maybe_layer->layerId();
  23606. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
  23607. return at::_ops::softplus_backward::call(grad_output, self, beta, threshold);
  23608. }
  23609. Tensor grad_output_value;
  23610. optional<int64_t> grad_output_bdim;
  23611. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  23612. Tensor self_value;
  23613. optional<int64_t> self_bdim;
  23614. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23615. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, beta, threshold);
  23616. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23617. }
  23618. template <typename batch_rule_t, batch_rule_t batch_rule>
  23619. at::Tensor softshrink_generated_plumbing(const at::Tensor & self, const at::Scalar & lambd) {
  23620. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23621. auto maybe_layer = maybeCurrentDynamicLayer();
  23622. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23623. int64_t cur_level = maybe_layer->layerId();
  23624. if (!isBatchedAtLevel(self, cur_level)) {
  23625. return at::_ops::softshrink::call(self, lambd);
  23626. }
  23627. Tensor self_value;
  23628. optional<int64_t> self_bdim;
  23629. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23630. auto results = batch_rule(self_value, self_bdim, lambd);
  23631. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23632. }
  23633. template <typename batch_rule_t, batch_rule_t batch_rule>
  23634. at::Tensor softshrink_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd) {
  23635. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23636. auto maybe_layer = maybeCurrentDynamicLayer();
  23637. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23638. int64_t cur_level = maybe_layer->layerId();
  23639. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
  23640. return at::_ops::softshrink_backward::call(grad_output, self, lambd);
  23641. }
  23642. Tensor grad_output_value;
  23643. optional<int64_t> grad_output_bdim;
  23644. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  23645. Tensor self_value;
  23646. optional<int64_t> self_bdim;
  23647. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23648. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, lambd);
  23649. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23650. }
  23651. template <typename batch_rule_t, batch_rule_t batch_rule>
  23652. at::Tensor adaptive_avg_pool2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size) {
  23653. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23654. auto maybe_layer = maybeCurrentDynamicLayer();
  23655. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23656. int64_t cur_level = maybe_layer->layerId();
  23657. if (!isBatchedAtLevel(self, cur_level)) {
  23658. return at::_ops::adaptive_avg_pool2d::call(self, output_size);
  23659. }
  23660. Tensor self_value;
  23661. optional<int64_t> self_bdim;
  23662. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23663. auto results = batch_rule(self_value, self_bdim, output_size);
  23664. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23665. }
  23666. template <typename batch_rule_t, batch_rule_t batch_rule>
  23667. at::Tensor mkldnn_adaptive_avg_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef output_size) {
  23668. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23669. auto maybe_layer = maybeCurrentDynamicLayer();
  23670. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23671. int64_t cur_level = maybe_layer->layerId();
  23672. if (!isBatchedAtLevel(self, cur_level)) {
  23673. return at::_ops::mkldnn_adaptive_avg_pool2d::call(self, output_size);
  23674. }
  23675. Tensor self_value;
  23676. optional<int64_t> self_bdim;
  23677. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23678. auto results = batch_rule(self_value, self_bdim, output_size);
  23679. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23680. }
  23681. template <typename batch_rule_t, batch_rule_t batch_rule>
  23682. at::Tensor mkldnn_adaptive_avg_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) {
  23683. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23684. auto maybe_layer = maybeCurrentDynamicLayer();
  23685. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23686. int64_t cur_level = maybe_layer->layerId();
  23687. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
  23688. return at::_ops::mkldnn_adaptive_avg_pool2d_backward::call(grad_output, self);
  23689. }
  23690. Tensor grad_output_value;
  23691. optional<int64_t> grad_output_bdim;
  23692. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  23693. Tensor self_value;
  23694. optional<int64_t> self_bdim;
  23695. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23696. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim);
  23697. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23698. }
  23699. template <typename batch_rule_t, batch_rule_t batch_rule>
  23700. at::Tensor _adaptive_avg_pool2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size) {
  23701. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23702. auto maybe_layer = maybeCurrentDynamicLayer();
  23703. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23704. int64_t cur_level = maybe_layer->layerId();
  23705. if (!isBatchedAtLevel(self, cur_level)) {
  23706. return at::_ops::_adaptive_avg_pool2d::call(self, output_size);
  23707. }
  23708. Tensor self_value;
  23709. optional<int64_t> self_bdim;
  23710. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23711. auto results = batch_rule(self_value, self_bdim, output_size);
  23712. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23713. }
  23714. template <typename batch_rule_t, batch_rule_t batch_rule>
  23715. at::Tensor _adaptive_avg_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) {
  23716. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23717. auto maybe_layer = maybeCurrentDynamicLayer();
  23718. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23719. int64_t cur_level = maybe_layer->layerId();
  23720. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
  23721. return at::_ops::_adaptive_avg_pool2d_backward::call(grad_output, self);
  23722. }
  23723. Tensor grad_output_value;
  23724. optional<int64_t> grad_output_bdim;
  23725. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  23726. Tensor self_value;
  23727. optional<int64_t> self_bdim;
  23728. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23729. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim);
  23730. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23731. }
  23732. template <typename batch_rule_t, batch_rule_t batch_rule>
  23733. at::Tensor adaptive_avg_pool3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size) {
  23734. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23735. auto maybe_layer = maybeCurrentDynamicLayer();
  23736. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23737. int64_t cur_level = maybe_layer->layerId();
  23738. if (!isBatchedAtLevel(self, cur_level)) {
  23739. return at::_ops::adaptive_avg_pool3d::call(self, output_size);
  23740. }
  23741. Tensor self_value;
  23742. optional<int64_t> self_bdim;
  23743. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23744. auto results = batch_rule(self_value, self_bdim, output_size);
  23745. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23746. }
  23747. template <typename batch_rule_t, batch_rule_t batch_rule>
  23748. at::Tensor _adaptive_avg_pool3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size) {
  23749. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23750. auto maybe_layer = maybeCurrentDynamicLayer();
  23751. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23752. int64_t cur_level = maybe_layer->layerId();
  23753. if (!isBatchedAtLevel(self, cur_level)) {
  23754. return at::_ops::_adaptive_avg_pool3d::call(self, output_size);
  23755. }
  23756. Tensor self_value;
  23757. optional<int64_t> self_bdim;
  23758. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23759. auto results = batch_rule(self_value, self_bdim, output_size);
  23760. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23761. }
  23762. template <typename batch_rule_t, batch_rule_t batch_rule>
  23763. at::Tensor _adaptive_avg_pool3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) {
  23764. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23765. auto maybe_layer = maybeCurrentDynamicLayer();
  23766. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23767. int64_t cur_level = maybe_layer->layerId();
  23768. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
  23769. return at::_ops::_adaptive_avg_pool3d_backward::call(grad_output, self);
  23770. }
  23771. Tensor grad_output_value;
  23772. optional<int64_t> grad_output_bdim;
  23773. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  23774. Tensor self_value;
  23775. optional<int64_t> self_bdim;
  23776. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23777. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim);
  23778. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23779. }
  23780. template <typename batch_rule_t, batch_rule_t batch_rule>
  23781. ::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef output_size) {
  23782. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23783. auto maybe_layer = maybeCurrentDynamicLayer();
  23784. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23785. int64_t cur_level = maybe_layer->layerId();
  23786. if (!isBatchedAtLevel(self, cur_level)) {
  23787. return at::_ops::adaptive_max_pool2d::call(self, output_size);
  23788. }
  23789. Tensor self_value;
  23790. optional<int64_t> self_bdim;
  23791. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23792. auto results = batch_rule(self_value, self_bdim, output_size);
  23793. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  23794. }
  23795. template <typename batch_rule_t, batch_rule_t batch_rule>
  23796. at::Tensor adaptive_max_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) {
  23797. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23798. auto maybe_layer = maybeCurrentDynamicLayer();
  23799. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23800. int64_t cur_level = maybe_layer->layerId();
  23801. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
  23802. return at::_ops::adaptive_max_pool2d_backward::call(grad_output, self, indices);
  23803. }
  23804. Tensor grad_output_value;
  23805. optional<int64_t> grad_output_bdim;
  23806. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  23807. Tensor self_value;
  23808. optional<int64_t> self_bdim;
  23809. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23810. Tensor indices_value;
  23811. optional<int64_t> indices_bdim;
  23812. std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
  23813. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, indices_value, indices_bdim);
  23814. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23815. }
  23816. template <typename batch_rule_t, batch_rule_t batch_rule>
  23817. ::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool3d_generated_plumbing(const at::Tensor & self, at::IntArrayRef output_size) {
  23818. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23819. auto maybe_layer = maybeCurrentDynamicLayer();
  23820. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23821. int64_t cur_level = maybe_layer->layerId();
  23822. if (!isBatchedAtLevel(self, cur_level)) {
  23823. return at::_ops::adaptive_max_pool3d::call(self, output_size);
  23824. }
  23825. Tensor self_value;
  23826. optional<int64_t> self_bdim;
  23827. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23828. auto results = batch_rule(self_value, self_bdim, output_size);
  23829. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  23830. }
  23831. template <typename batch_rule_t, batch_rule_t batch_rule>
  23832. at::Tensor adaptive_max_pool3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) {
  23833. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23834. auto maybe_layer = maybeCurrentDynamicLayer();
  23835. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23836. int64_t cur_level = maybe_layer->layerId();
  23837. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
  23838. return at::_ops::adaptive_max_pool3d_backward::call(grad_output, self, indices);
  23839. }
  23840. Tensor grad_output_value;
  23841. optional<int64_t> grad_output_bdim;
  23842. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  23843. Tensor self_value;
  23844. optional<int64_t> self_bdim;
  23845. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23846. Tensor indices_value;
  23847. optional<int64_t> indices_bdim;
  23848. std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
  23849. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, indices_value, indices_bdim);
  23850. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23851. }
  23852. template <typename batch_rule_t, batch_rule_t batch_rule>
  23853. at::Tensor avg_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
  23854. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23855. auto maybe_layer = maybeCurrentDynamicLayer();
  23856. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23857. int64_t cur_level = maybe_layer->layerId();
  23858. if (!isBatchedAtLevel(self, cur_level)) {
  23859. return at::_ops::avg_pool2d::call(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
  23860. }
  23861. Tensor self_value;
  23862. optional<int64_t> self_bdim;
  23863. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23864. auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
  23865. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23866. }
  23867. template <typename batch_rule_t, batch_rule_t batch_rule>
  23868. at::Tensor avg_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
  23869. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23870. auto maybe_layer = maybeCurrentDynamicLayer();
  23871. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23872. int64_t cur_level = maybe_layer->layerId();
  23873. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
  23874. return at::_ops::avg_pool2d_backward::call(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
  23875. }
  23876. Tensor grad_output_value;
  23877. optional<int64_t> grad_output_bdim;
  23878. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  23879. Tensor self_value;
  23880. optional<int64_t> self_bdim;
  23881. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23882. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
  23883. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23884. }
  23885. template <typename batch_rule_t, batch_rule_t batch_rule>
  23886. at::Tensor avg_pool3d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
  23887. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23888. auto maybe_layer = maybeCurrentDynamicLayer();
  23889. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23890. int64_t cur_level = maybe_layer->layerId();
  23891. if (!isBatchedAtLevel(self, cur_level)) {
  23892. return at::_ops::avg_pool3d::call(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
  23893. }
  23894. Tensor self_value;
  23895. optional<int64_t> self_bdim;
  23896. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23897. auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
  23898. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23899. }
  23900. template <typename batch_rule_t, batch_rule_t batch_rule>
  23901. at::Tensor avg_pool3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
  23902. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23903. auto maybe_layer = maybeCurrentDynamicLayer();
  23904. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23905. int64_t cur_level = maybe_layer->layerId();
  23906. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
  23907. return at::_ops::avg_pool3d_backward::call(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
  23908. }
  23909. Tensor grad_output_value;
  23910. optional<int64_t> grad_output_bdim;
  23911. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  23912. Tensor self_value;
  23913. optional<int64_t> self_bdim;
  23914. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23915. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
  23916. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23917. }
  23918. template <typename batch_rule_t, batch_rule_t batch_rule>
  23919. ::std::tuple<at::Tensor,at::Tensor> fractional_max_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) {
  23920. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23921. auto maybe_layer = maybeCurrentDynamicLayer();
  23922. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23923. int64_t cur_level = maybe_layer->layerId();
  23924. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(random_samples, cur_level)) {
  23925. return at::_ops::fractional_max_pool2d::call(self, kernel_size, output_size, random_samples);
  23926. }
  23927. Tensor self_value;
  23928. optional<int64_t> self_bdim;
  23929. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23930. Tensor random_samples_value;
  23931. optional<int64_t> random_samples_bdim;
  23932. std::tie(random_samples_value, random_samples_bdim) = unwrapTensorAtLevel(random_samples, cur_level);
  23933. auto results = batch_rule(self_value, self_bdim, kernel_size, output_size, random_samples_value, random_samples_bdim);
  23934. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  23935. }
  23936. template <typename batch_rule_t, batch_rule_t batch_rule>
  23937. at::Tensor fractional_max_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) {
  23938. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23939. auto maybe_layer = maybeCurrentDynamicLayer();
  23940. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23941. int64_t cur_level = maybe_layer->layerId();
  23942. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
  23943. return at::_ops::fractional_max_pool2d_backward::call(grad_output, self, kernel_size, output_size, indices);
  23944. }
  23945. Tensor grad_output_value;
  23946. optional<int64_t> grad_output_bdim;
  23947. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  23948. Tensor self_value;
  23949. optional<int64_t> self_bdim;
  23950. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23951. Tensor indices_value;
  23952. optional<int64_t> indices_bdim;
  23953. std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
  23954. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, output_size, indices_value, indices_bdim);
  23955. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23956. }
  23957. template <typename batch_rule_t, batch_rule_t batch_rule>
  23958. ::std::tuple<at::Tensor,at::Tensor> fractional_max_pool3d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) {
  23959. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23960. auto maybe_layer = maybeCurrentDynamicLayer();
  23961. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23962. int64_t cur_level = maybe_layer->layerId();
  23963. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(random_samples, cur_level)) {
  23964. return at::_ops::fractional_max_pool3d::call(self, kernel_size, output_size, random_samples);
  23965. }
  23966. Tensor self_value;
  23967. optional<int64_t> self_bdim;
  23968. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23969. Tensor random_samples_value;
  23970. optional<int64_t> random_samples_bdim;
  23971. std::tie(random_samples_value, random_samples_bdim) = unwrapTensorAtLevel(random_samples, cur_level);
  23972. auto results = batch_rule(self_value, self_bdim, kernel_size, output_size, random_samples_value, random_samples_bdim);
  23973. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  23974. }
  23975. template <typename batch_rule_t, batch_rule_t batch_rule>
  23976. at::Tensor fractional_max_pool3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) {
  23977. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23978. auto maybe_layer = maybeCurrentDynamicLayer();
  23979. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  23980. int64_t cur_level = maybe_layer->layerId();
  23981. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
  23982. return at::_ops::fractional_max_pool3d_backward::call(grad_output, self, kernel_size, output_size, indices);
  23983. }
  23984. Tensor grad_output_value;
  23985. optional<int64_t> grad_output_bdim;
  23986. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  23987. Tensor self_value;
  23988. optional<int64_t> self_bdim;
  23989. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  23990. Tensor indices_value;
  23991. optional<int64_t> indices_bdim;
  23992. std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
  23993. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, output_size, indices_value, indices_bdim);
  23994. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  23995. }
  23996. template <typename batch_rule_t, batch_rule_t batch_rule>
  23997. ::std::tuple<at::Tensor,at::Tensor> max_pool2d_with_indices_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
  23998. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  23999. auto maybe_layer = maybeCurrentDynamicLayer();
  24000. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24001. int64_t cur_level = maybe_layer->layerId();
  24002. if (!isBatchedAtLevel(self, cur_level)) {
  24003. return at::_ops::max_pool2d_with_indices::call(self, kernel_size, stride, padding, dilation, ceil_mode);
  24004. }
  24005. Tensor self_value;
  24006. optional<int64_t> self_bdim;
  24007. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  24008. auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
  24009. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  24010. }
  24011. template <typename batch_rule_t, batch_rule_t batch_rule>
  24012. at::Tensor max_pool2d_with_indices_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) {
  24013. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24014. auto maybe_layer = maybeCurrentDynamicLayer();
  24015. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24016. int64_t cur_level = maybe_layer->layerId();
  24017. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
  24018. return at::_ops::max_pool2d_with_indices_backward::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
  24019. }
  24020. Tensor grad_output_value;
  24021. optional<int64_t> grad_output_bdim;
  24022. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  24023. Tensor self_value;
  24024. optional<int64_t> self_bdim;
  24025. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  24026. Tensor indices_value;
  24027. optional<int64_t> indices_bdim;
  24028. std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
  24029. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode, indices_value, indices_bdim);
  24030. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24031. }
  24032. template <typename batch_rule_t, batch_rule_t batch_rule>
  24033. ::std::tuple<at::Tensor,at::Tensor> max_pool3d_with_indices_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
  24034. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24035. auto maybe_layer = maybeCurrentDynamicLayer();
  24036. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24037. int64_t cur_level = maybe_layer->layerId();
  24038. if (!isBatchedAtLevel(self, cur_level)) {
  24039. return at::_ops::max_pool3d_with_indices::call(self, kernel_size, stride, padding, dilation, ceil_mode);
  24040. }
  24041. Tensor self_value;
  24042. optional<int64_t> self_bdim;
  24043. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  24044. auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
  24045. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  24046. }
  24047. template <typename batch_rule_t, batch_rule_t batch_rule>
  24048. at::Tensor max_pool3d_with_indices_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) {
  24049. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24050. auto maybe_layer = maybeCurrentDynamicLayer();
  24051. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24052. int64_t cur_level = maybe_layer->layerId();
  24053. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
  24054. return at::_ops::max_pool3d_with_indices_backward::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
  24055. }
  24056. Tensor grad_output_value;
  24057. optional<int64_t> grad_output_bdim;
  24058. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  24059. Tensor self_value;
  24060. optional<int64_t> self_bdim;
  24061. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  24062. Tensor indices_value;
  24063. optional<int64_t> indices_bdim;
  24064. std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
  24065. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode, indices_value, indices_bdim);
  24066. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24067. }
  24068. template <typename batch_rule_t, batch_rule_t batch_rule>
  24069. at::Tensor max_unpool2d_generated_plumbing(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size) {
  24070. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24071. auto maybe_layer = maybeCurrentDynamicLayer();
  24072. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24073. int64_t cur_level = maybe_layer->layerId();
  24074. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
  24075. return at::_ops::max_unpool2d::call(self, indices, output_size);
  24076. }
  24077. Tensor self_value;
  24078. optional<int64_t> self_bdim;
  24079. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  24080. Tensor indices_value;
  24081. optional<int64_t> indices_bdim;
  24082. std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
  24083. auto results = batch_rule(self_value, self_bdim, indices_value, indices_bdim, output_size);
  24084. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24085. }
  24086. template <typename batch_rule_t, batch_rule_t batch_rule>
  24087. at::Tensor max_unpool3d_generated_plumbing(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding) {
  24088. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24089. auto maybe_layer = maybeCurrentDynamicLayer();
  24090. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24091. int64_t cur_level = maybe_layer->layerId();
  24092. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
  24093. return at::_ops::max_unpool3d::call(self, indices, output_size, stride, padding);
  24094. }
  24095. Tensor self_value;
  24096. optional<int64_t> self_bdim;
  24097. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  24098. Tensor indices_value;
  24099. optional<int64_t> indices_bdim;
  24100. std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
  24101. auto results = batch_rule(self_value, self_bdim, indices_value, indices_bdim, output_size, stride, padding);
  24102. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24103. }
  24104. template <typename batch_rule_t, batch_rule_t batch_rule>
  24105. at::Tensor reflection_pad1d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) {
  24106. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24107. auto maybe_layer = maybeCurrentDynamicLayer();
  24108. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24109. int64_t cur_level = maybe_layer->layerId();
  24110. if (!isBatchedAtLevel(self, cur_level)) {
  24111. return at::_ops::reflection_pad1d::call(self, padding);
  24112. }
  24113. Tensor self_value;
  24114. optional<int64_t> self_bdim;
  24115. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  24116. auto results = batch_rule(self_value, self_bdim, padding);
  24117. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24118. }
  24119. template <typename batch_rule_t, batch_rule_t batch_rule>
  24120. at::Tensor reflection_pad1d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
  24121. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24122. auto maybe_layer = maybeCurrentDynamicLayer();
  24123. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24124. int64_t cur_level = maybe_layer->layerId();
  24125. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
  24126. return at::_ops::reflection_pad1d_backward::call(grad_output, self, padding);
  24127. }
  24128. Tensor grad_output_value;
  24129. optional<int64_t> grad_output_bdim;
  24130. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  24131. Tensor self_value;
  24132. optional<int64_t> self_bdim;
  24133. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  24134. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding);
  24135. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24136. }
  24137. template <typename batch_rule_t, batch_rule_t batch_rule>
  24138. at::Tensor reflection_pad2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) {
  24139. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24140. auto maybe_layer = maybeCurrentDynamicLayer();
  24141. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24142. int64_t cur_level = maybe_layer->layerId();
  24143. if (!isBatchedAtLevel(self, cur_level)) {
  24144. return at::_ops::reflection_pad2d::call(self, padding);
  24145. }
  24146. Tensor self_value;
  24147. optional<int64_t> self_bdim;
  24148. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  24149. auto results = batch_rule(self_value, self_bdim, padding);
  24150. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24151. }
  24152. template <typename batch_rule_t, batch_rule_t batch_rule>
  24153. at::Tensor reflection_pad2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
  24154. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24155. auto maybe_layer = maybeCurrentDynamicLayer();
  24156. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24157. int64_t cur_level = maybe_layer->layerId();
  24158. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
  24159. return at::_ops::reflection_pad2d_backward::call(grad_output, self, padding);
  24160. }
  24161. Tensor grad_output_value;
  24162. optional<int64_t> grad_output_bdim;
  24163. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  24164. Tensor self_value;
  24165. optional<int64_t> self_bdim;
  24166. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  24167. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding);
  24168. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24169. }
  24170. template <typename batch_rule_t, batch_rule_t batch_rule>
  24171. at::Tensor reflection_pad3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) {
  24172. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24173. auto maybe_layer = maybeCurrentDynamicLayer();
  24174. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24175. int64_t cur_level = maybe_layer->layerId();
  24176. if (!isBatchedAtLevel(self, cur_level)) {
  24177. return at::_ops::reflection_pad3d::call(self, padding);
  24178. }
  24179. Tensor self_value;
  24180. optional<int64_t> self_bdim;
  24181. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  24182. auto results = batch_rule(self_value, self_bdim, padding);
  24183. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24184. }
  24185. template <typename batch_rule_t, batch_rule_t batch_rule>
  24186. at::Tensor reflection_pad3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
  24187. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24188. auto maybe_layer = maybeCurrentDynamicLayer();
  24189. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24190. int64_t cur_level = maybe_layer->layerId();
  24191. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
  24192. return at::_ops::reflection_pad3d_backward::call(grad_output, self, padding);
  24193. }
  24194. Tensor grad_output_value;
  24195. optional<int64_t> grad_output_bdim;
  24196. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  24197. Tensor self_value;
  24198. optional<int64_t> self_bdim;
  24199. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  24200. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding);
  24201. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24202. }
  24203. template <typename batch_rule_t, batch_rule_t batch_rule>
  24204. at::Tensor replication_pad1d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) {
  24205. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24206. auto maybe_layer = maybeCurrentDynamicLayer();
  24207. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24208. int64_t cur_level = maybe_layer->layerId();
  24209. if (!isBatchedAtLevel(self, cur_level)) {
  24210. return at::_ops::replication_pad1d::call(self, padding);
  24211. }
  24212. Tensor self_value;
  24213. optional<int64_t> self_bdim;
  24214. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  24215. auto results = batch_rule(self_value, self_bdim, padding);
  24216. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24217. }
  24218. template <typename batch_rule_t, batch_rule_t batch_rule>
  24219. at::Tensor replication_pad1d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
  24220. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24221. auto maybe_layer = maybeCurrentDynamicLayer();
  24222. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24223. int64_t cur_level = maybe_layer->layerId();
  24224. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
  24225. return at::_ops::replication_pad1d_backward::call(grad_output, self, padding);
  24226. }
  24227. Tensor grad_output_value;
  24228. optional<int64_t> grad_output_bdim;
  24229. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  24230. Tensor self_value;
  24231. optional<int64_t> self_bdim;
  24232. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  24233. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding);
  24234. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24235. }
  24236. template <typename batch_rule_t, batch_rule_t batch_rule>
  24237. at::Tensor replication_pad2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) {
  24238. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24239. auto maybe_layer = maybeCurrentDynamicLayer();
  24240. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24241. int64_t cur_level = maybe_layer->layerId();
  24242. if (!isBatchedAtLevel(self, cur_level)) {
  24243. return at::_ops::replication_pad2d::call(self, padding);
  24244. }
  24245. Tensor self_value;
  24246. optional<int64_t> self_bdim;
  24247. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  24248. auto results = batch_rule(self_value, self_bdim, padding);
  24249. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24250. }
  24251. template <typename batch_rule_t, batch_rule_t batch_rule>
  24252. at::Tensor replication_pad2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
  24253. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24254. auto maybe_layer = maybeCurrentDynamicLayer();
  24255. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24256. int64_t cur_level = maybe_layer->layerId();
  24257. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
  24258. return at::_ops::replication_pad2d_backward::call(grad_output, self, padding);
  24259. }
  24260. Tensor grad_output_value;
  24261. optional<int64_t> grad_output_bdim;
  24262. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  24263. Tensor self_value;
  24264. optional<int64_t> self_bdim;
  24265. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  24266. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding);
  24267. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24268. }
  24269. template <typename batch_rule_t, batch_rule_t batch_rule>
  24270. at::Tensor replication_pad3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) {
  24271. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24272. auto maybe_layer = maybeCurrentDynamicLayer();
  24273. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24274. int64_t cur_level = maybe_layer->layerId();
  24275. if (!isBatchedAtLevel(self, cur_level)) {
  24276. return at::_ops::replication_pad3d::call(self, padding);
  24277. }
  24278. Tensor self_value;
  24279. optional<int64_t> self_bdim;
  24280. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  24281. auto results = batch_rule(self_value, self_bdim, padding);
  24282. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24283. }
  24284. template <typename batch_rule_t, batch_rule_t batch_rule>
  24285. at::Tensor replication_pad3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
  24286. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24287. auto maybe_layer = maybeCurrentDynamicLayer();
  24288. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24289. int64_t cur_level = maybe_layer->layerId();
  24290. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
  24291. return at::_ops::replication_pad3d_backward::call(grad_output, self, padding);
  24292. }
  24293. Tensor grad_output_value;
  24294. optional<int64_t> grad_output_bdim;
  24295. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  24296. Tensor self_value;
  24297. optional<int64_t> self_bdim;
  24298. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  24299. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding);
  24300. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24301. }
  24302. template <typename batch_rule_t, batch_rule_t batch_rule>
  24303. at::Tensor _pad_circular_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef pad) {
  24304. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24305. auto maybe_layer = maybeCurrentDynamicLayer();
  24306. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24307. int64_t cur_level = maybe_layer->layerId();
  24308. if (!isBatchedAtLevel(self, cur_level)) {
  24309. return at::_ops::_pad_circular::call(self, pad);
  24310. }
  24311. Tensor self_value;
  24312. optional<int64_t> self_bdim;
  24313. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  24314. auto results = batch_rule(self_value, self_bdim, pad);
  24315. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24316. }
  24317. template <typename batch_rule_t, batch_rule_t batch_rule>
  24318. at::Tensor _pad_enum_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef pad, int64_t mode, c10::optional<double> value) {
  24319. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24320. auto maybe_layer = maybeCurrentDynamicLayer();
  24321. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24322. int64_t cur_level = maybe_layer->layerId();
  24323. if (!isBatchedAtLevel(self, cur_level)) {
  24324. return at::_ops::_pad_enum::call(self, pad, mode, value);
  24325. }
  24326. Tensor self_value;
  24327. optional<int64_t> self_bdim;
  24328. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  24329. auto results = batch_rule(self_value, self_bdim, pad, mode, value);
  24330. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24331. }
  24332. template <typename batch_rule_t, batch_rule_t batch_rule>
  24333. at::Tensor pad_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef pad, c10::string_view mode, c10::optional<double> value) {
  24334. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24335. auto maybe_layer = maybeCurrentDynamicLayer();
  24336. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24337. int64_t cur_level = maybe_layer->layerId();
  24338. if (!isBatchedAtLevel(self, cur_level)) {
  24339. return at::_ops::pad::call(self, pad, mode, value);
  24340. }
  24341. Tensor self_value;
  24342. optional<int64_t> self_bdim;
  24343. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  24344. auto results = batch_rule(self_value, self_bdim, pad, mode, value);
  24345. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24346. }
  24347. template <typename batch_rule_t, batch_rule_t batch_rule>
  24348. at::Tensor upsample_linear1d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
  24349. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24350. auto maybe_layer = maybeCurrentDynamicLayer();
  24351. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24352. int64_t cur_level = maybe_layer->layerId();
  24353. if (!isBatchedAtLevel(input, cur_level)) {
  24354. return at::_ops::upsample_linear1d_vec::call(input, output_size, align_corners, scale_factors);
  24355. }
  24356. Tensor input_value;
  24357. optional<int64_t> input_bdim;
  24358. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  24359. auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors);
  24360. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24361. }
  24362. template <typename batch_rule_t, batch_rule_t batch_rule>
  24363. at::Tensor upsample_bilinear2d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
  24364. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24365. auto maybe_layer = maybeCurrentDynamicLayer();
  24366. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24367. int64_t cur_level = maybe_layer->layerId();
  24368. if (!isBatchedAtLevel(input, cur_level)) {
  24369. return at::_ops::upsample_bilinear2d_vec::call(input, output_size, align_corners, scale_factors);
  24370. }
  24371. Tensor input_value;
  24372. optional<int64_t> input_bdim;
  24373. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  24374. auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors);
  24375. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24376. }
  24377. template <typename batch_rule_t, batch_rule_t batch_rule>
  24378. at::Tensor _upsample_bilinear2d_aa_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
  24379. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24380. auto maybe_layer = maybeCurrentDynamicLayer();
  24381. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24382. int64_t cur_level = maybe_layer->layerId();
  24383. if (!isBatchedAtLevel(input, cur_level)) {
  24384. return at::_ops::_upsample_bilinear2d_aa_vec::call(input, output_size, align_corners, scale_factors);
  24385. }
  24386. Tensor input_value;
  24387. optional<int64_t> input_bdim;
  24388. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  24389. auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors);
  24390. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24391. }
  24392. template <typename batch_rule_t, batch_rule_t batch_rule>
  24393. at::Tensor upsample_trilinear3d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
  24394. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24395. auto maybe_layer = maybeCurrentDynamicLayer();
  24396. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24397. int64_t cur_level = maybe_layer->layerId();
  24398. if (!isBatchedAtLevel(input, cur_level)) {
  24399. return at::_ops::upsample_trilinear3d_vec::call(input, output_size, align_corners, scale_factors);
  24400. }
  24401. Tensor input_value;
  24402. optional<int64_t> input_bdim;
  24403. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  24404. auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors);
  24405. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24406. }
  24407. template <typename batch_rule_t, batch_rule_t batch_rule>
  24408. at::Tensor upsample_bicubic2d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
  24409. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24410. auto maybe_layer = maybeCurrentDynamicLayer();
  24411. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24412. int64_t cur_level = maybe_layer->layerId();
  24413. if (!isBatchedAtLevel(input, cur_level)) {
  24414. return at::_ops::upsample_bicubic2d_vec::call(input, output_size, align_corners, scale_factors);
  24415. }
  24416. Tensor input_value;
  24417. optional<int64_t> input_bdim;
  24418. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  24419. auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors);
  24420. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24421. }
  24422. template <typename batch_rule_t, batch_rule_t batch_rule>
  24423. at::Tensor _upsample_bicubic2d_aa_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
  24424. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24425. auto maybe_layer = maybeCurrentDynamicLayer();
  24426. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24427. int64_t cur_level = maybe_layer->layerId();
  24428. if (!isBatchedAtLevel(input, cur_level)) {
  24429. return at::_ops::_upsample_bicubic2d_aa_vec::call(input, output_size, align_corners, scale_factors);
  24430. }
  24431. Tensor input_value;
  24432. optional<int64_t> input_bdim;
  24433. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  24434. auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors);
  24435. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24436. }
  24437. template <typename batch_rule_t, batch_rule_t batch_rule>
  24438. at::Tensor upsample_nearest1d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
  24439. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24440. auto maybe_layer = maybeCurrentDynamicLayer();
  24441. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24442. int64_t cur_level = maybe_layer->layerId();
  24443. if (!isBatchedAtLevel(input, cur_level)) {
  24444. return at::_ops::upsample_nearest1d_vec::call(input, output_size, scale_factors);
  24445. }
  24446. Tensor input_value;
  24447. optional<int64_t> input_bdim;
  24448. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  24449. auto results = batch_rule(input_value, input_bdim, output_size, scale_factors);
  24450. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24451. }
  24452. template <typename batch_rule_t, batch_rule_t batch_rule>
  24453. at::Tensor _upsample_nearest_exact1d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
  24454. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24455. auto maybe_layer = maybeCurrentDynamicLayer();
  24456. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24457. int64_t cur_level = maybe_layer->layerId();
  24458. if (!isBatchedAtLevel(input, cur_level)) {
  24459. return at::_ops::_upsample_nearest_exact1d_vec::call(input, output_size, scale_factors);
  24460. }
  24461. Tensor input_value;
  24462. optional<int64_t> input_bdim;
  24463. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  24464. auto results = batch_rule(input_value, input_bdim, output_size, scale_factors);
  24465. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24466. }
  24467. template <typename batch_rule_t, batch_rule_t batch_rule>
  24468. at::Tensor upsample_nearest2d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
  24469. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24470. auto maybe_layer = maybeCurrentDynamicLayer();
  24471. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24472. int64_t cur_level = maybe_layer->layerId();
  24473. if (!isBatchedAtLevel(input, cur_level)) {
  24474. return at::_ops::upsample_nearest2d_vec::call(input, output_size, scale_factors);
  24475. }
  24476. Tensor input_value;
  24477. optional<int64_t> input_bdim;
  24478. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  24479. auto results = batch_rule(input_value, input_bdim, output_size, scale_factors);
  24480. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24481. }
  24482. template <typename batch_rule_t, batch_rule_t batch_rule>
  24483. at::Tensor _upsample_nearest_exact2d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
  24484. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24485. auto maybe_layer = maybeCurrentDynamicLayer();
  24486. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24487. int64_t cur_level = maybe_layer->layerId();
  24488. if (!isBatchedAtLevel(input, cur_level)) {
  24489. return at::_ops::_upsample_nearest_exact2d_vec::call(input, output_size, scale_factors);
  24490. }
  24491. Tensor input_value;
  24492. optional<int64_t> input_bdim;
  24493. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  24494. auto results = batch_rule(input_value, input_bdim, output_size, scale_factors);
  24495. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24496. }
  24497. template <typename batch_rule_t, batch_rule_t batch_rule>
  24498. at::Tensor upsample_nearest3d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
  24499. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24500. auto maybe_layer = maybeCurrentDynamicLayer();
  24501. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24502. int64_t cur_level = maybe_layer->layerId();
  24503. if (!isBatchedAtLevel(input, cur_level)) {
  24504. return at::_ops::upsample_nearest3d_vec::call(input, output_size, scale_factors);
  24505. }
  24506. Tensor input_value;
  24507. optional<int64_t> input_bdim;
  24508. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  24509. auto results = batch_rule(input_value, input_bdim, output_size, scale_factors);
  24510. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24511. }
  24512. template <typename batch_rule_t, batch_rule_t batch_rule>
  24513. at::Tensor _upsample_nearest_exact3d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
  24514. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24515. auto maybe_layer = maybeCurrentDynamicLayer();
  24516. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24517. int64_t cur_level = maybe_layer->layerId();
  24518. if (!isBatchedAtLevel(input, cur_level)) {
  24519. return at::_ops::_upsample_nearest_exact3d_vec::call(input, output_size, scale_factors);
  24520. }
  24521. Tensor input_value;
  24522. optional<int64_t> input_bdim;
  24523. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  24524. auto results = batch_rule(input_value, input_bdim, output_size, scale_factors);
  24525. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24526. }
  24527. template <typename batch_rule_t, batch_rule_t batch_rule>
  24528. at::Tensor upsample_linear1d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales) {
  24529. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24530. auto maybe_layer = maybeCurrentDynamicLayer();
  24531. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24532. int64_t cur_level = maybe_layer->layerId();
  24533. if (!isBatchedAtLevel(self, cur_level)) {
  24534. return at::_ops::upsample_linear1d::call(self, output_size, align_corners, scales);
  24535. }
  24536. Tensor self_value;
  24537. optional<int64_t> self_bdim;
  24538. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  24539. auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales);
  24540. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24541. }
  24542. template <typename batch_rule_t, batch_rule_t batch_rule>
  24543. at::Tensor upsample_linear1d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales) {
  24544. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24545. auto maybe_layer = maybeCurrentDynamicLayer();
  24546. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24547. int64_t cur_level = maybe_layer->layerId();
  24548. if (!isBatchedAtLevel(grad_output, cur_level)) {
  24549. return at::_ops::upsample_linear1d_backward::call(grad_output, output_size, input_size, align_corners, scales);
  24550. }
  24551. Tensor grad_output_value;
  24552. optional<int64_t> grad_output_bdim;
  24553. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  24554. auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales);
  24555. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24556. }
  24557. template <typename batch_rule_t, batch_rule_t batch_rule>
  24558. at::Tensor upsample_bilinear2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
  24559. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24560. auto maybe_layer = maybeCurrentDynamicLayer();
  24561. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24562. int64_t cur_level = maybe_layer->layerId();
  24563. if (!isBatchedAtLevel(self, cur_level)) {
  24564. return at::_ops::upsample_bilinear2d::call(self, output_size, align_corners, scales_h, scales_w);
  24565. }
  24566. Tensor self_value;
  24567. optional<int64_t> self_bdim;
  24568. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  24569. auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales_h, scales_w);
  24570. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24571. }
  24572. template <typename batch_rule_t, batch_rule_t batch_rule>
  24573. at::Tensor upsample_bilinear2d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
  24574. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24575. auto maybe_layer = maybeCurrentDynamicLayer();
  24576. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24577. int64_t cur_level = maybe_layer->layerId();
  24578. if (!isBatchedAtLevel(grad_output, cur_level)) {
  24579. return at::_ops::upsample_bilinear2d_backward::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
  24580. }
  24581. Tensor grad_output_value;
  24582. optional<int64_t> grad_output_bdim;
  24583. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  24584. auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales_h, scales_w);
  24585. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24586. }
  24587. template <typename batch_rule_t, batch_rule_t batch_rule>
  24588. at::Tensor _upsample_bilinear2d_aa_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
  24589. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24590. auto maybe_layer = maybeCurrentDynamicLayer();
  24591. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24592. int64_t cur_level = maybe_layer->layerId();
  24593. if (!isBatchedAtLevel(self, cur_level)) {
  24594. return at::_ops::_upsample_bilinear2d_aa::call(self, output_size, align_corners, scales_h, scales_w);
  24595. }
  24596. Tensor self_value;
  24597. optional<int64_t> self_bdim;
  24598. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  24599. auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales_h, scales_w);
  24600. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24601. }
  24602. template <typename batch_rule_t, batch_rule_t batch_rule>
  24603. at::Tensor _upsample_bilinear2d_aa_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
  24604. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24605. auto maybe_layer = maybeCurrentDynamicLayer();
  24606. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24607. int64_t cur_level = maybe_layer->layerId();
  24608. if (!isBatchedAtLevel(grad_output, cur_level)) {
  24609. return at::_ops::_upsample_bilinear2d_aa_backward::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
  24610. }
  24611. Tensor grad_output_value;
  24612. optional<int64_t> grad_output_bdim;
  24613. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  24614. auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales_h, scales_w);
  24615. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24616. }
  24617. template <typename batch_rule_t, batch_rule_t batch_rule>
  24618. at::Tensor upsample_bicubic2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
  24619. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24620. auto maybe_layer = maybeCurrentDynamicLayer();
  24621. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24622. int64_t cur_level = maybe_layer->layerId();
  24623. if (!isBatchedAtLevel(self, cur_level)) {
  24624. return at::_ops::upsample_bicubic2d::call(self, output_size, align_corners, scales_h, scales_w);
  24625. }
  24626. Tensor self_value;
  24627. optional<int64_t> self_bdim;
  24628. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  24629. auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales_h, scales_w);
  24630. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24631. }
  24632. template <typename batch_rule_t, batch_rule_t batch_rule>
  24633. at::Tensor upsample_bicubic2d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
  24634. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24635. auto maybe_layer = maybeCurrentDynamicLayer();
  24636. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24637. int64_t cur_level = maybe_layer->layerId();
  24638. if (!isBatchedAtLevel(grad_output, cur_level)) {
  24639. return at::_ops::upsample_bicubic2d_backward::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
  24640. }
  24641. Tensor grad_output_value;
  24642. optional<int64_t> grad_output_bdim;
  24643. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  24644. auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales_h, scales_w);
  24645. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24646. }
  24647. template <typename batch_rule_t, batch_rule_t batch_rule>
  24648. at::Tensor _upsample_bicubic2d_aa_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
  24649. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24650. auto maybe_layer = maybeCurrentDynamicLayer();
  24651. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24652. int64_t cur_level = maybe_layer->layerId();
  24653. if (!isBatchedAtLevel(self, cur_level)) {
  24654. return at::_ops::_upsample_bicubic2d_aa::call(self, output_size, align_corners, scales_h, scales_w);
  24655. }
  24656. Tensor self_value;
  24657. optional<int64_t> self_bdim;
  24658. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  24659. auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales_h, scales_w);
  24660. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24661. }
  24662. template <typename batch_rule_t, batch_rule_t batch_rule>
  24663. at::Tensor _upsample_bicubic2d_aa_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
  24664. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24665. auto maybe_layer = maybeCurrentDynamicLayer();
  24666. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24667. int64_t cur_level = maybe_layer->layerId();
  24668. if (!isBatchedAtLevel(grad_output, cur_level)) {
  24669. return at::_ops::_upsample_bicubic2d_aa_backward::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
  24670. }
  24671. Tensor grad_output_value;
  24672. optional<int64_t> grad_output_bdim;
  24673. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  24674. auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales_h, scales_w);
  24675. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24676. }
  24677. template <typename batch_rule_t, batch_rule_t batch_rule>
  24678. at::Tensor upsample_trilinear3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
  24679. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24680. auto maybe_layer = maybeCurrentDynamicLayer();
  24681. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24682. int64_t cur_level = maybe_layer->layerId();
  24683. if (!isBatchedAtLevel(self, cur_level)) {
  24684. return at::_ops::upsample_trilinear3d::call(self, output_size, align_corners, scales_d, scales_h, scales_w);
  24685. }
  24686. Tensor self_value;
  24687. optional<int64_t> self_bdim;
  24688. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  24689. auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales_d, scales_h, scales_w);
  24690. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24691. }
  24692. template <typename batch_rule_t, batch_rule_t batch_rule>
  24693. at::Tensor upsample_trilinear3d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
  24694. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24695. auto maybe_layer = maybeCurrentDynamicLayer();
  24696. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24697. int64_t cur_level = maybe_layer->layerId();
  24698. if (!isBatchedAtLevel(grad_output, cur_level)) {
  24699. return at::_ops::upsample_trilinear3d_backward::call(grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w);
  24700. }
  24701. Tensor grad_output_value;
  24702. optional<int64_t> grad_output_bdim;
  24703. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  24704. auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales_d, scales_h, scales_w);
  24705. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24706. }
  24707. template <typename batch_rule_t, batch_rule_t batch_rule>
  24708. at::Tensor upsample_nearest1d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales) {
  24709. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24710. auto maybe_layer = maybeCurrentDynamicLayer();
  24711. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24712. int64_t cur_level = maybe_layer->layerId();
  24713. if (!isBatchedAtLevel(self, cur_level)) {
  24714. return at::_ops::upsample_nearest1d::call(self, output_size, scales);
  24715. }
  24716. Tensor self_value;
  24717. optional<int64_t> self_bdim;
  24718. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  24719. auto results = batch_rule(self_value, self_bdim, output_size, scales);
  24720. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24721. }
  24722. template <typename batch_rule_t, batch_rule_t batch_rule>
  24723. at::Tensor _upsample_nearest_exact1d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales) {
  24724. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24725. auto maybe_layer = maybeCurrentDynamicLayer();
  24726. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24727. int64_t cur_level = maybe_layer->layerId();
  24728. if (!isBatchedAtLevel(self, cur_level)) {
  24729. return at::_ops::_upsample_nearest_exact1d::call(self, output_size, scales);
  24730. }
  24731. Tensor self_value;
  24732. optional<int64_t> self_bdim;
  24733. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  24734. auto results = batch_rule(self_value, self_bdim, output_size, scales);
  24735. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24736. }
  24737. template <typename batch_rule_t, batch_rule_t batch_rule>
  24738. at::Tensor upsample_nearest1d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales) {
  24739. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24740. auto maybe_layer = maybeCurrentDynamicLayer();
  24741. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24742. int64_t cur_level = maybe_layer->layerId();
  24743. if (!isBatchedAtLevel(grad_output, cur_level)) {
  24744. return at::_ops::upsample_nearest1d_backward::call(grad_output, output_size, input_size, scales);
  24745. }
  24746. Tensor grad_output_value;
  24747. optional<int64_t> grad_output_bdim;
  24748. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  24749. auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales);
  24750. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24751. }
  24752. template <typename batch_rule_t, batch_rule_t batch_rule>
  24753. at::Tensor _upsample_nearest_exact1d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales) {
  24754. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24755. auto maybe_layer = maybeCurrentDynamicLayer();
  24756. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24757. int64_t cur_level = maybe_layer->layerId();
  24758. if (!isBatchedAtLevel(grad_output, cur_level)) {
  24759. return at::_ops::_upsample_nearest_exact1d_backward::call(grad_output, output_size, input_size, scales);
  24760. }
  24761. Tensor grad_output_value;
  24762. optional<int64_t> grad_output_bdim;
  24763. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  24764. auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales);
  24765. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24766. }
  24767. template <typename batch_rule_t, batch_rule_t batch_rule>
  24768. at::Tensor upsample_nearest2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
  24769. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24770. auto maybe_layer = maybeCurrentDynamicLayer();
  24771. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24772. int64_t cur_level = maybe_layer->layerId();
  24773. if (!isBatchedAtLevel(self, cur_level)) {
  24774. return at::_ops::upsample_nearest2d::call(self, output_size, scales_h, scales_w);
  24775. }
  24776. Tensor self_value;
  24777. optional<int64_t> self_bdim;
  24778. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  24779. auto results = batch_rule(self_value, self_bdim, output_size, scales_h, scales_w);
  24780. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24781. }
  24782. template <typename batch_rule_t, batch_rule_t batch_rule>
  24783. at::Tensor _upsample_nearest_exact2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
  24784. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24785. auto maybe_layer = maybeCurrentDynamicLayer();
  24786. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24787. int64_t cur_level = maybe_layer->layerId();
  24788. if (!isBatchedAtLevel(self, cur_level)) {
  24789. return at::_ops::_upsample_nearest_exact2d::call(self, output_size, scales_h, scales_w);
  24790. }
  24791. Tensor self_value;
  24792. optional<int64_t> self_bdim;
  24793. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  24794. auto results = batch_rule(self_value, self_bdim, output_size, scales_h, scales_w);
  24795. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24796. }
  24797. template <typename batch_rule_t, batch_rule_t batch_rule>
  24798. at::Tensor upsample_nearest2d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
  24799. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24800. auto maybe_layer = maybeCurrentDynamicLayer();
  24801. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24802. int64_t cur_level = maybe_layer->layerId();
  24803. if (!isBatchedAtLevel(grad_output, cur_level)) {
  24804. return at::_ops::upsample_nearest2d_backward::call(grad_output, output_size, input_size, scales_h, scales_w);
  24805. }
  24806. Tensor grad_output_value;
  24807. optional<int64_t> grad_output_bdim;
  24808. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  24809. auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales_h, scales_w);
  24810. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24811. }
  24812. template <typename batch_rule_t, batch_rule_t batch_rule>
  24813. at::Tensor _upsample_nearest_exact2d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
  24814. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24815. auto maybe_layer = maybeCurrentDynamicLayer();
  24816. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24817. int64_t cur_level = maybe_layer->layerId();
  24818. if (!isBatchedAtLevel(grad_output, cur_level)) {
  24819. return at::_ops::_upsample_nearest_exact2d_backward::call(grad_output, output_size, input_size, scales_h, scales_w);
  24820. }
  24821. Tensor grad_output_value;
  24822. optional<int64_t> grad_output_bdim;
  24823. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  24824. auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales_h, scales_w);
  24825. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24826. }
  24827. template <typename batch_rule_t, batch_rule_t batch_rule>
  24828. at::Tensor upsample_nearest3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
  24829. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24830. auto maybe_layer = maybeCurrentDynamicLayer();
  24831. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24832. int64_t cur_level = maybe_layer->layerId();
  24833. if (!isBatchedAtLevel(self, cur_level)) {
  24834. return at::_ops::upsample_nearest3d::call(self, output_size, scales_d, scales_h, scales_w);
  24835. }
  24836. Tensor self_value;
  24837. optional<int64_t> self_bdim;
  24838. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  24839. auto results = batch_rule(self_value, self_bdim, output_size, scales_d, scales_h, scales_w);
  24840. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24841. }
  24842. template <typename batch_rule_t, batch_rule_t batch_rule>
  24843. at::Tensor _upsample_nearest_exact3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
  24844. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24845. auto maybe_layer = maybeCurrentDynamicLayer();
  24846. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24847. int64_t cur_level = maybe_layer->layerId();
  24848. if (!isBatchedAtLevel(self, cur_level)) {
  24849. return at::_ops::_upsample_nearest_exact3d::call(self, output_size, scales_d, scales_h, scales_w);
  24850. }
  24851. Tensor self_value;
  24852. optional<int64_t> self_bdim;
  24853. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  24854. auto results = batch_rule(self_value, self_bdim, output_size, scales_d, scales_h, scales_w);
  24855. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24856. }
  24857. template <typename batch_rule_t, batch_rule_t batch_rule>
  24858. at::Tensor upsample_nearest3d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
  24859. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24860. auto maybe_layer = maybeCurrentDynamicLayer();
  24861. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24862. int64_t cur_level = maybe_layer->layerId();
  24863. if (!isBatchedAtLevel(grad_output, cur_level)) {
  24864. return at::_ops::upsample_nearest3d_backward::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w);
  24865. }
  24866. Tensor grad_output_value;
  24867. optional<int64_t> grad_output_bdim;
  24868. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  24869. auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales_d, scales_h, scales_w);
  24870. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24871. }
  24872. template <typename batch_rule_t, batch_rule_t batch_rule>
  24873. at::Tensor _upsample_nearest_exact3d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
  24874. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24875. auto maybe_layer = maybeCurrentDynamicLayer();
  24876. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24877. int64_t cur_level = maybe_layer->layerId();
  24878. if (!isBatchedAtLevel(grad_output, cur_level)) {
  24879. return at::_ops::_upsample_nearest_exact3d_backward::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w);
  24880. }
  24881. Tensor grad_output_value;
  24882. optional<int64_t> grad_output_bdim;
  24883. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  24884. auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales_d, scales_h, scales_w);
  24885. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24886. }
  24887. template <typename batch_rule_t, batch_rule_t batch_rule>
  24888. at::Tensor sigmoid_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output) {
  24889. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24890. auto maybe_layer = maybeCurrentDynamicLayer();
  24891. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24892. int64_t cur_level = maybe_layer->layerId();
  24893. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level)) {
  24894. return at::_ops::sigmoid_backward::call(grad_output, output);
  24895. }
  24896. Tensor grad_output_value;
  24897. optional<int64_t> grad_output_bdim;
  24898. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  24899. Tensor output_value;
  24900. optional<int64_t> output_bdim;
  24901. std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
  24902. auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim);
  24903. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24904. }
  24905. template <typename batch_rule_t, batch_rule_t batch_rule>
  24906. at::Tensor logit_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::optional<double> eps) {
  24907. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24908. auto maybe_layer = maybeCurrentDynamicLayer();
  24909. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24910. int64_t cur_level = maybe_layer->layerId();
  24911. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
  24912. return at::_ops::logit_backward::call(grad_output, self, eps);
  24913. }
  24914. Tensor grad_output_value;
  24915. optional<int64_t> grad_output_bdim;
  24916. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  24917. Tensor self_value;
  24918. optional<int64_t> self_bdim;
  24919. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  24920. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, eps);
  24921. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24922. }
  24923. template <typename batch_rule_t, batch_rule_t batch_rule>
  24924. at::Tensor tanh_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output) {
  24925. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24926. auto maybe_layer = maybeCurrentDynamicLayer();
  24927. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24928. int64_t cur_level = maybe_layer->layerId();
  24929. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level)) {
  24930. return at::_ops::tanh_backward::call(grad_output, output);
  24931. }
  24932. Tensor grad_output_value;
  24933. optional<int64_t> grad_output_bdim;
  24934. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  24935. Tensor output_value;
  24936. optional<int64_t> output_bdim;
  24937. std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
  24938. auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim);
  24939. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24940. }
  24941. template <typename batch_rule_t, batch_rule_t batch_rule>
  24942. at::Tensor slow_conv_transpose2d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef dilation) {
  24943. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24944. auto maybe_layer = maybeCurrentDynamicLayer();
  24945. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24946. int64_t cur_level = maybe_layer->layerId();
  24947. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  24948. return at::_ops::slow_conv_transpose2d::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
  24949. }
  24950. Tensor self_value;
  24951. optional<int64_t> self_bdim;
  24952. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  24953. Tensor weight_value;
  24954. optional<int64_t> weight_bdim;
  24955. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  24956. optional<Tensor> bias_value;
  24957. optional<int64_t> bias_bdim;
  24958. if (bias) {
  24959. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  24960. }
  24961. auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, output_padding, dilation);
  24962. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24963. }
  24964. template <typename batch_rule_t, batch_rule_t batch_rule>
  24965. at::Tensor slow_conv_transpose3d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef dilation) {
  24966. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24967. auto maybe_layer = maybeCurrentDynamicLayer();
  24968. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24969. int64_t cur_level = maybe_layer->layerId();
  24970. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  24971. return at::_ops::slow_conv_transpose3d::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
  24972. }
  24973. Tensor self_value;
  24974. optional<int64_t> self_bdim;
  24975. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  24976. Tensor weight_value;
  24977. optional<int64_t> weight_bdim;
  24978. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  24979. optional<Tensor> bias_value;
  24980. optional<int64_t> bias_bdim;
  24981. if (bias) {
  24982. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  24983. }
  24984. auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, output_padding, dilation);
  24985. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  24986. }
  24987. template <typename batch_rule_t, batch_rule_t batch_rule>
  24988. at::Tensor thnn_conv2d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding) {
  24989. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  24990. auto maybe_layer = maybeCurrentDynamicLayer();
  24991. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  24992. int64_t cur_level = maybe_layer->layerId();
  24993. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  24994. return at::_ops::thnn_conv2d::call(self, weight, kernel_size, bias, stride, padding);
  24995. }
  24996. Tensor self_value;
  24997. optional<int64_t> self_bdim;
  24998. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  24999. Tensor weight_value;
  25000. optional<int64_t> weight_bdim;
  25001. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  25002. optional<Tensor> bias_value;
  25003. optional<int64_t> bias_bdim;
  25004. if (bias) {
  25005. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  25006. }
  25007. auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding);
  25008. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25009. }
  25010. template <typename batch_rule_t, batch_rule_t batch_rule>
  25011. at::Tensor _slow_conv2d_forward_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding) {
  25012. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25013. auto maybe_layer = maybeCurrentDynamicLayer();
  25014. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25015. int64_t cur_level = maybe_layer->layerId();
  25016. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  25017. return at::_ops::_slow_conv2d_forward::call(self, weight, kernel_size, bias, stride, padding);
  25018. }
  25019. Tensor self_value;
  25020. optional<int64_t> self_bdim;
  25021. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25022. Tensor weight_value;
  25023. optional<int64_t> weight_bdim;
  25024. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  25025. optional<Tensor> bias_value;
  25026. optional<int64_t> bias_bdim;
  25027. if (bias) {
  25028. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  25029. }
  25030. auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding);
  25031. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25032. }
  25033. template <typename batch_rule_t, batch_rule_t batch_rule>
  25034. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _slow_conv2d_backward_output_mask_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array<bool,3> output_mask) {
  25035. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25036. auto maybe_layer = maybeCurrentDynamicLayer();
  25037. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25038. int64_t cur_level = maybe_layer->layerId();
  25039. if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
  25040. return at::_ops::_slow_conv2d_backward_output_mask::call(grad_output, self, weight, kernel_size, stride, padding, output_mask);
  25041. }
  25042. Tensor grad_output_value;
  25043. optional<int64_t> grad_output_bdim;
  25044. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
  25045. Tensor self_value;
  25046. optional<int64_t> self_bdim;
  25047. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25048. Tensor weight_value;
  25049. optional<int64_t> weight_bdim;
  25050. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  25051. auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, weight_value, weight_bdim, kernel_size, stride, padding, output_mask);
  25052. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  25053. }
  25054. template <typename batch_rule_t, batch_rule_t batch_rule>
  25055. at::Tensor _conv_depthwise2d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation) {
  25056. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25057. auto maybe_layer = maybeCurrentDynamicLayer();
  25058. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25059. int64_t cur_level = maybe_layer->layerId();
  25060. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  25061. return at::_ops::_conv_depthwise2d::call(self, weight, kernel_size, bias, stride, padding, dilation);
  25062. }
  25063. Tensor self_value;
  25064. optional<int64_t> self_bdim;
  25065. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25066. Tensor weight_value;
  25067. optional<int64_t> weight_bdim;
  25068. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  25069. optional<Tensor> bias_value;
  25070. optional<int64_t> bias_bdim;
  25071. if (bias) {
  25072. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  25073. }
  25074. auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, dilation);
  25075. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25076. }
  25077. template <typename batch_rule_t, batch_rule_t batch_rule>
  25078. at::Tensor conv_depthwise3d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation) {
  25079. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25080. auto maybe_layer = maybeCurrentDynamicLayer();
  25081. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25082. int64_t cur_level = maybe_layer->layerId();
  25083. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  25084. return at::_ops::conv_depthwise3d::call(self, weight, kernel_size, bias, stride, padding, dilation);
  25085. }
  25086. Tensor self_value;
  25087. optional<int64_t> self_bdim;
  25088. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25089. Tensor weight_value;
  25090. optional<int64_t> weight_bdim;
  25091. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  25092. optional<Tensor> bias_value;
  25093. optional<int64_t> bias_bdim;
  25094. if (bias) {
  25095. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  25096. }
  25097. auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, dilation);
  25098. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25099. }
  25100. template <typename batch_rule_t, batch_rule_t batch_rule>
  25101. at::Tensor slow_conv3d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding) {
  25102. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25103. auto maybe_layer = maybeCurrentDynamicLayer();
  25104. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25105. int64_t cur_level = maybe_layer->layerId();
  25106. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  25107. return at::_ops::slow_conv3d::call(self, weight, kernel_size, bias, stride, padding);
  25108. }
  25109. Tensor self_value;
  25110. optional<int64_t> self_bdim;
  25111. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25112. Tensor weight_value;
  25113. optional<int64_t> weight_bdim;
  25114. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  25115. optional<Tensor> bias_value;
  25116. optional<int64_t> bias_bdim;
  25117. if (bias) {
  25118. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  25119. }
  25120. auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding);
  25121. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25122. }
  25123. template <typename batch_rule_t, batch_rule_t batch_rule>
  25124. at::Tensor slow_conv3d_forward_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding) {
  25125. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25126. auto maybe_layer = maybeCurrentDynamicLayer();
  25127. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25128. int64_t cur_level = maybe_layer->layerId();
  25129. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  25130. return at::_ops::slow_conv3d_forward::call(self, weight, kernel_size, bias, stride, padding);
  25131. }
  25132. Tensor self_value;
  25133. optional<int64_t> self_bdim;
  25134. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25135. Tensor weight_value;
  25136. optional<int64_t> weight_bdim;
  25137. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  25138. optional<Tensor> bias_value;
  25139. optional<int64_t> bias_bdim;
  25140. if (bias) {
  25141. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  25142. }
  25143. auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding);
  25144. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25145. }
  25146. template <typename batch_rule_t, batch_rule_t batch_rule>
  25147. at::Tensor slow_conv_dilated2d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation) {
  25148. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25149. auto maybe_layer = maybeCurrentDynamicLayer();
  25150. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25151. int64_t cur_level = maybe_layer->layerId();
  25152. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  25153. return at::_ops::slow_conv_dilated2d::call(self, weight, kernel_size, bias, stride, padding, dilation);
  25154. }
  25155. Tensor self_value;
  25156. optional<int64_t> self_bdim;
  25157. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25158. Tensor weight_value;
  25159. optional<int64_t> weight_bdim;
  25160. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  25161. optional<Tensor> bias_value;
  25162. optional<int64_t> bias_bdim;
  25163. if (bias) {
  25164. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  25165. }
  25166. auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, dilation);
  25167. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25168. }
  25169. template <typename batch_rule_t, batch_rule_t batch_rule>
  25170. at::Tensor slow_conv_dilated3d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation) {
  25171. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25172. auto maybe_layer = maybeCurrentDynamicLayer();
  25173. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25174. int64_t cur_level = maybe_layer->layerId();
  25175. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
  25176. return at::_ops::slow_conv_dilated3d::call(self, weight, kernel_size, bias, stride, padding, dilation);
  25177. }
  25178. Tensor self_value;
  25179. optional<int64_t> self_bdim;
  25180. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25181. Tensor weight_value;
  25182. optional<int64_t> weight_bdim;
  25183. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
  25184. optional<Tensor> bias_value;
  25185. optional<int64_t> bias_bdim;
  25186. if (bias) {
  25187. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  25188. }
  25189. auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, dilation);
  25190. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25191. }
  25192. template <typename batch_rule_t, batch_rule_t batch_rule>
  25193. at::Tensor col2im_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
  25194. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25195. auto maybe_layer = maybeCurrentDynamicLayer();
  25196. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25197. int64_t cur_level = maybe_layer->layerId();
  25198. if (!isBatchedAtLevel(self, cur_level)) {
  25199. return at::_ops::col2im::call(self, output_size, kernel_size, dilation, padding, stride);
  25200. }
  25201. Tensor self_value;
  25202. optional<int64_t> self_bdim;
  25203. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25204. auto results = batch_rule(self_value, self_bdim, output_size, kernel_size, dilation, padding, stride);
  25205. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25206. }
  25207. template <typename batch_rule_t, batch_rule_t batch_rule>
  25208. at::Tensor column_stack_generated_plumbing(at::TensorList tensors) {
  25209. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25210. auto maybe_layer = maybeCurrentDynamicLayer();
  25211. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25212. int64_t cur_level = maybe_layer->layerId();
  25213. if (!isBatchedAtLevel(tensors, cur_level)) {
  25214. return at::_ops::column_stack::call(tensors);
  25215. }
  25216. auto results = batch_rule(tensors);
  25217. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25218. }
  25219. template <typename batch_rule_t, batch_rule_t batch_rule>
  25220. at::Tensor im2col_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
  25221. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25222. auto maybe_layer = maybeCurrentDynamicLayer();
  25223. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25224. int64_t cur_level = maybe_layer->layerId();
  25225. if (!isBatchedAtLevel(self, cur_level)) {
  25226. return at::_ops::im2col::call(self, kernel_size, dilation, padding, stride);
  25227. }
  25228. Tensor self_value;
  25229. optional<int64_t> self_bdim;
  25230. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25231. auto results = batch_rule(self_value, self_bdim, kernel_size, dilation, padding, stride);
  25232. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25233. }
  25234. template <typename batch_rule_t, batch_rule_t batch_rule>
  25235. at::Tensor isfinite_generated_plumbing(const at::Tensor & self) {
  25236. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25237. auto maybe_layer = maybeCurrentDynamicLayer();
  25238. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25239. int64_t cur_level = maybe_layer->layerId();
  25240. if (!isBatchedAtLevel(self, cur_level)) {
  25241. return at::_ops::isfinite::call(self);
  25242. }
  25243. Tensor self_value;
  25244. optional<int64_t> self_bdim;
  25245. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25246. auto results = batch_rule(self_value, self_bdim);
  25247. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25248. }
  25249. template <typename batch_rule_t, batch_rule_t batch_rule>
  25250. at::Tensor isinf_generated_plumbing(const at::Tensor & self) {
  25251. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25252. auto maybe_layer = maybeCurrentDynamicLayer();
  25253. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25254. int64_t cur_level = maybe_layer->layerId();
  25255. if (!isBatchedAtLevel(self, cur_level)) {
  25256. return at::_ops::isinf::call(self);
  25257. }
  25258. Tensor self_value;
  25259. optional<int64_t> self_bdim;
  25260. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25261. auto results = batch_rule(self_value, self_bdim);
  25262. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25263. }
  25264. template <typename batch_rule_t, batch_rule_t batch_rule>
  25265. void record_stream_generated_plumbing(at::Tensor & self, at::Stream s) {
  25266. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25267. auto maybe_layer = maybeCurrentDynamicLayer();
  25268. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  25269. int64_t cur_level = maybe_layer->layerId();
  25270. if (!isBatchedAtLevel(self, cur_level)) {
  25271. return at::_ops::record_stream::call(self, s);
  25272. }
  25273. Tensor self_value;
  25274. optional<int64_t> self_bdim;
  25275. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25276. batch_rule(self_value, self_bdim, s);
  25277. }
  25278. template <typename batch_rule_t, batch_rule_t batch_rule>
  25279. at::Tensor isposinf_generated_plumbing(const at::Tensor & self) {
  25280. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25281. auto maybe_layer = maybeCurrentDynamicLayer();
  25282. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25283. int64_t cur_level = maybe_layer->layerId();
  25284. if (!isBatchedAtLevel(self, cur_level)) {
  25285. return at::_ops::isposinf::call(self);
  25286. }
  25287. Tensor self_value;
  25288. optional<int64_t> self_bdim;
  25289. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25290. auto results = batch_rule(self_value, self_bdim);
  25291. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25292. }
  25293. template <typename batch_rule_t, batch_rule_t batch_rule>
  25294. at::Tensor isneginf_generated_plumbing(const at::Tensor & self) {
  25295. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25296. auto maybe_layer = maybeCurrentDynamicLayer();
  25297. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25298. int64_t cur_level = maybe_layer->layerId();
  25299. if (!isBatchedAtLevel(self, cur_level)) {
  25300. return at::_ops::isneginf::call(self);
  25301. }
  25302. Tensor self_value;
  25303. optional<int64_t> self_bdim;
  25304. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25305. auto results = batch_rule(self_value, self_bdim);
  25306. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25307. }
  25308. template <typename batch_rule_t, batch_rule_t batch_rule>
  25309. at::Tensor _add_batch_dim_generated_plumbing(const at::Tensor & self, int64_t batch_dim, int64_t level) {
  25310. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25311. auto maybe_layer = maybeCurrentDynamicLayer();
  25312. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25313. int64_t cur_level = maybe_layer->layerId();
  25314. if (!isBatchedAtLevel(self, cur_level)) {
  25315. return at::_ops::_add_batch_dim::call(self, batch_dim, level);
  25316. }
  25317. Tensor self_value;
  25318. optional<int64_t> self_bdim;
  25319. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25320. auto results = batch_rule(self_value, self_bdim, batch_dim, level);
  25321. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25322. }
  25323. template <typename batch_rule_t, batch_rule_t batch_rule>
  25324. at::Tensor _remove_batch_dim_generated_plumbing(const at::Tensor & self, int64_t level, int64_t batch_size, int64_t out_dim) {
  25325. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25326. auto maybe_layer = maybeCurrentDynamicLayer();
  25327. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25328. int64_t cur_level = maybe_layer->layerId();
  25329. if (!isBatchedAtLevel(self, cur_level)) {
  25330. return at::_ops::_remove_batch_dim::call(self, level, batch_size, out_dim);
  25331. }
  25332. Tensor self_value;
  25333. optional<int64_t> self_bdim;
  25334. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25335. auto results = batch_rule(self_value, self_bdim, level, batch_size, out_dim);
  25336. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25337. }
  25338. template <typename batch_rule_t, batch_rule_t batch_rule>
  25339. at::Tensor special_entr_generated_plumbing(const at::Tensor & self) {
  25340. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25341. auto maybe_layer = maybeCurrentDynamicLayer();
  25342. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25343. int64_t cur_level = maybe_layer->layerId();
  25344. if (!isBatchedAtLevel(self, cur_level)) {
  25345. return at::_ops::special_entr::call(self);
  25346. }
  25347. Tensor self_value;
  25348. optional<int64_t> self_bdim;
  25349. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25350. auto results = batch_rule(self_value, self_bdim);
  25351. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25352. }
  25353. template <typename batch_rule_t, batch_rule_t batch_rule>
  25354. at::Tensor special_ndtri_generated_plumbing(const at::Tensor & self) {
  25355. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25356. auto maybe_layer = maybeCurrentDynamicLayer();
  25357. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25358. int64_t cur_level = maybe_layer->layerId();
  25359. if (!isBatchedAtLevel(self, cur_level)) {
  25360. return at::_ops::special_ndtri::call(self);
  25361. }
  25362. Tensor self_value;
  25363. optional<int64_t> self_bdim;
  25364. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25365. auto results = batch_rule(self_value, self_bdim);
  25366. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25367. }
  25368. template <typename batch_rule_t, batch_rule_t batch_rule>
  25369. at::Tensor special_log_ndtr_generated_plumbing(const at::Tensor & self) {
  25370. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25371. auto maybe_layer = maybeCurrentDynamicLayer();
  25372. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25373. int64_t cur_level = maybe_layer->layerId();
  25374. if (!isBatchedAtLevel(self, cur_level)) {
  25375. return at::_ops::special_log_ndtr::call(self);
  25376. }
  25377. Tensor self_value;
  25378. optional<int64_t> self_bdim;
  25379. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25380. auto results = batch_rule(self_value, self_bdim);
  25381. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25382. }
  25383. template <typename batch_rule_t, batch_rule_t batch_rule>
  25384. at::Tensor special_expm1_generated_plumbing(const at::Tensor & self) {
  25385. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25386. auto maybe_layer = maybeCurrentDynamicLayer();
  25387. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25388. int64_t cur_level = maybe_layer->layerId();
  25389. if (!isBatchedAtLevel(self, cur_level)) {
  25390. return at::_ops::special_expm1::call(self);
  25391. }
  25392. Tensor self_value;
  25393. optional<int64_t> self_bdim;
  25394. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25395. auto results = batch_rule(self_value, self_bdim);
  25396. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25397. }
  25398. template <typename batch_rule_t, batch_rule_t batch_rule>
  25399. at::Tensor special_exp2_generated_plumbing(const at::Tensor & self) {
  25400. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25401. auto maybe_layer = maybeCurrentDynamicLayer();
  25402. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25403. int64_t cur_level = maybe_layer->layerId();
  25404. if (!isBatchedAtLevel(self, cur_level)) {
  25405. return at::_ops::special_exp2::call(self);
  25406. }
  25407. Tensor self_value;
  25408. optional<int64_t> self_bdim;
  25409. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25410. auto results = batch_rule(self_value, self_bdim);
  25411. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25412. }
  25413. template <typename batch_rule_t, batch_rule_t batch_rule>
  25414. at::Tensor special_psi_generated_plumbing(const at::Tensor & self) {
  25415. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25416. auto maybe_layer = maybeCurrentDynamicLayer();
  25417. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25418. int64_t cur_level = maybe_layer->layerId();
  25419. if (!isBatchedAtLevel(self, cur_level)) {
  25420. return at::_ops::special_psi::call(self);
  25421. }
  25422. Tensor self_value;
  25423. optional<int64_t> self_bdim;
  25424. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25425. auto results = batch_rule(self_value, self_bdim);
  25426. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25427. }
  25428. template <typename batch_rule_t, batch_rule_t batch_rule>
  25429. at::Tensor special_digamma_generated_plumbing(const at::Tensor & self) {
  25430. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25431. auto maybe_layer = maybeCurrentDynamicLayer();
  25432. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25433. int64_t cur_level = maybe_layer->layerId();
  25434. if (!isBatchedAtLevel(self, cur_level)) {
  25435. return at::_ops::special_digamma::call(self);
  25436. }
  25437. Tensor self_value;
  25438. optional<int64_t> self_bdim;
  25439. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25440. auto results = batch_rule(self_value, self_bdim);
  25441. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25442. }
  25443. template <typename batch_rule_t, batch_rule_t batch_rule>
  25444. at::Tensor special_gammaln_generated_plumbing(const at::Tensor & self) {
  25445. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25446. auto maybe_layer = maybeCurrentDynamicLayer();
  25447. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25448. int64_t cur_level = maybe_layer->layerId();
  25449. if (!isBatchedAtLevel(self, cur_level)) {
  25450. return at::_ops::special_gammaln::call(self);
  25451. }
  25452. Tensor self_value;
  25453. optional<int64_t> self_bdim;
  25454. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25455. auto results = batch_rule(self_value, self_bdim);
  25456. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25457. }
  25458. template <typename batch_rule_t, batch_rule_t batch_rule>
  25459. at::Tensor special_erf_generated_plumbing(const at::Tensor & self) {
  25460. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25461. auto maybe_layer = maybeCurrentDynamicLayer();
  25462. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25463. int64_t cur_level = maybe_layer->layerId();
  25464. if (!isBatchedAtLevel(self, cur_level)) {
  25465. return at::_ops::special_erf::call(self);
  25466. }
  25467. Tensor self_value;
  25468. optional<int64_t> self_bdim;
  25469. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25470. auto results = batch_rule(self_value, self_bdim);
  25471. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25472. }
  25473. template <typename batch_rule_t, batch_rule_t batch_rule>
  25474. at::Tensor special_erfc_generated_plumbing(const at::Tensor & self) {
  25475. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25476. auto maybe_layer = maybeCurrentDynamicLayer();
  25477. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25478. int64_t cur_level = maybe_layer->layerId();
  25479. if (!isBatchedAtLevel(self, cur_level)) {
  25480. return at::_ops::special_erfc::call(self);
  25481. }
  25482. Tensor self_value;
  25483. optional<int64_t> self_bdim;
  25484. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25485. auto results = batch_rule(self_value, self_bdim);
  25486. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25487. }
  25488. template <typename batch_rule_t, batch_rule_t batch_rule>
  25489. at::Tensor special_erfcx_generated_plumbing(const at::Tensor & self) {
  25490. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25491. auto maybe_layer = maybeCurrentDynamicLayer();
  25492. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25493. int64_t cur_level = maybe_layer->layerId();
  25494. if (!isBatchedAtLevel(self, cur_level)) {
  25495. return at::_ops::special_erfcx::call(self);
  25496. }
  25497. Tensor self_value;
  25498. optional<int64_t> self_bdim;
  25499. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25500. auto results = batch_rule(self_value, self_bdim);
  25501. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25502. }
  25503. template <typename batch_rule_t, batch_rule_t batch_rule>
  25504. at::Tensor special_erfinv_generated_plumbing(const at::Tensor & self) {
  25505. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25506. auto maybe_layer = maybeCurrentDynamicLayer();
  25507. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25508. int64_t cur_level = maybe_layer->layerId();
  25509. if (!isBatchedAtLevel(self, cur_level)) {
  25510. return at::_ops::special_erfinv::call(self);
  25511. }
  25512. Tensor self_value;
  25513. optional<int64_t> self_bdim;
  25514. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25515. auto results = batch_rule(self_value, self_bdim);
  25516. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25517. }
  25518. template <typename batch_rule_t, batch_rule_t batch_rule>
  25519. at::Tensor special_ndtr_generated_plumbing(const at::Tensor & self) {
  25520. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25521. auto maybe_layer = maybeCurrentDynamicLayer();
  25522. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25523. int64_t cur_level = maybe_layer->layerId();
  25524. if (!isBatchedAtLevel(self, cur_level)) {
  25525. return at::_ops::special_ndtr::call(self);
  25526. }
  25527. Tensor self_value;
  25528. optional<int64_t> self_bdim;
  25529. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25530. auto results = batch_rule(self_value, self_bdim);
  25531. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25532. }
  25533. template <typename batch_rule_t, batch_rule_t batch_rule>
  25534. at::Tensor special_xlog1py_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  25535. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25536. auto maybe_layer = maybeCurrentDynamicLayer();
  25537. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25538. int64_t cur_level = maybe_layer->layerId();
  25539. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  25540. return at::_ops::special_xlog1py::call(self, other);
  25541. }
  25542. Tensor self_value;
  25543. optional<int64_t> self_bdim;
  25544. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25545. Tensor other_value;
  25546. optional<int64_t> other_bdim;
  25547. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  25548. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  25549. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25550. }
  25551. template <typename batch_rule_t, batch_rule_t batch_rule>
  25552. at::Tensor special_xlog1py_self_scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
  25553. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25554. auto maybe_layer = maybeCurrentDynamicLayer();
  25555. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25556. int64_t cur_level = maybe_layer->layerId();
  25557. if (!isBatchedAtLevel(other, cur_level)) {
  25558. return at::_ops::special_xlog1py_self_scalar::call(self, other);
  25559. }
  25560. Tensor other_value;
  25561. optional<int64_t> other_bdim;
  25562. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  25563. auto results = batch_rule(self, other_value, other_bdim);
  25564. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25565. }
  25566. template <typename batch_rule_t, batch_rule_t batch_rule>
  25567. at::Tensor special_xlog1py_other_scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
  25568. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25569. auto maybe_layer = maybeCurrentDynamicLayer();
  25570. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25571. int64_t cur_level = maybe_layer->layerId();
  25572. if (!isBatchedAtLevel(self, cur_level)) {
  25573. return at::_ops::special_xlog1py_other_scalar::call(self, other);
  25574. }
  25575. Tensor self_value;
  25576. optional<int64_t> self_bdim;
  25577. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25578. auto results = batch_rule(self_value, self_bdim, other);
  25579. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25580. }
  25581. template <typename batch_rule_t, batch_rule_t batch_rule>
  25582. at::Tensor special_xlogy_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  25583. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25584. auto maybe_layer = maybeCurrentDynamicLayer();
  25585. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25586. int64_t cur_level = maybe_layer->layerId();
  25587. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  25588. return at::_ops::special_xlogy::call(self, other);
  25589. }
  25590. Tensor self_value;
  25591. optional<int64_t> self_bdim;
  25592. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25593. Tensor other_value;
  25594. optional<int64_t> other_bdim;
  25595. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  25596. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  25597. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25598. }
  25599. template <typename batch_rule_t, batch_rule_t batch_rule>
  25600. at::Tensor special_xlogy_self_scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
  25601. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25602. auto maybe_layer = maybeCurrentDynamicLayer();
  25603. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25604. int64_t cur_level = maybe_layer->layerId();
  25605. if (!isBatchedAtLevel(other, cur_level)) {
  25606. return at::_ops::special_xlogy_self_scalar::call(self, other);
  25607. }
  25608. Tensor other_value;
  25609. optional<int64_t> other_bdim;
  25610. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  25611. auto results = batch_rule(self, other_value, other_bdim);
  25612. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25613. }
  25614. template <typename batch_rule_t, batch_rule_t batch_rule>
  25615. at::Tensor special_xlogy_other_scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
  25616. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25617. auto maybe_layer = maybeCurrentDynamicLayer();
  25618. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25619. int64_t cur_level = maybe_layer->layerId();
  25620. if (!isBatchedAtLevel(self, cur_level)) {
  25621. return at::_ops::special_xlogy_other_scalar::call(self, other);
  25622. }
  25623. Tensor self_value;
  25624. optional<int64_t> self_bdim;
  25625. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25626. auto results = batch_rule(self_value, self_bdim, other);
  25627. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25628. }
  25629. template <typename batch_rule_t, batch_rule_t batch_rule>
  25630. at::Tensor special_zeta_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  25631. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25632. auto maybe_layer = maybeCurrentDynamicLayer();
  25633. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25634. int64_t cur_level = maybe_layer->layerId();
  25635. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  25636. return at::_ops::special_zeta::call(self, other);
  25637. }
  25638. Tensor self_value;
  25639. optional<int64_t> self_bdim;
  25640. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25641. Tensor other_value;
  25642. optional<int64_t> other_bdim;
  25643. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  25644. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  25645. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25646. }
  25647. template <typename batch_rule_t, batch_rule_t batch_rule>
  25648. at::Tensor special_zeta_self_scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
  25649. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25650. auto maybe_layer = maybeCurrentDynamicLayer();
  25651. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25652. int64_t cur_level = maybe_layer->layerId();
  25653. if (!isBatchedAtLevel(other, cur_level)) {
  25654. return at::_ops::special_zeta_self_scalar::call(self, other);
  25655. }
  25656. Tensor other_value;
  25657. optional<int64_t> other_bdim;
  25658. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  25659. auto results = batch_rule(self, other_value, other_bdim);
  25660. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25661. }
  25662. template <typename batch_rule_t, batch_rule_t batch_rule>
  25663. at::Tensor special_zeta_other_scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
  25664. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25665. auto maybe_layer = maybeCurrentDynamicLayer();
  25666. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25667. int64_t cur_level = maybe_layer->layerId();
  25668. if (!isBatchedAtLevel(self, cur_level)) {
  25669. return at::_ops::special_zeta_other_scalar::call(self, other);
  25670. }
  25671. Tensor self_value;
  25672. optional<int64_t> self_bdim;
  25673. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25674. auto results = batch_rule(self_value, self_bdim, other);
  25675. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25676. }
  25677. template <typename batch_rule_t, batch_rule_t batch_rule>
  25678. at::Tensor special_i0_generated_plumbing(const at::Tensor & self) {
  25679. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25680. auto maybe_layer = maybeCurrentDynamicLayer();
  25681. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25682. int64_t cur_level = maybe_layer->layerId();
  25683. if (!isBatchedAtLevel(self, cur_level)) {
  25684. return at::_ops::special_i0::call(self);
  25685. }
  25686. Tensor self_value;
  25687. optional<int64_t> self_bdim;
  25688. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25689. auto results = batch_rule(self_value, self_bdim);
  25690. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25691. }
  25692. template <typename batch_rule_t, batch_rule_t batch_rule>
  25693. at::Tensor special_i0e_generated_plumbing(const at::Tensor & self) {
  25694. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25695. auto maybe_layer = maybeCurrentDynamicLayer();
  25696. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25697. int64_t cur_level = maybe_layer->layerId();
  25698. if (!isBatchedAtLevel(self, cur_level)) {
  25699. return at::_ops::special_i0e::call(self);
  25700. }
  25701. Tensor self_value;
  25702. optional<int64_t> self_bdim;
  25703. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25704. auto results = batch_rule(self_value, self_bdim);
  25705. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25706. }
  25707. template <typename batch_rule_t, batch_rule_t batch_rule>
  25708. at::Tensor special_i1_generated_plumbing(const at::Tensor & self) {
  25709. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25710. auto maybe_layer = maybeCurrentDynamicLayer();
  25711. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25712. int64_t cur_level = maybe_layer->layerId();
  25713. if (!isBatchedAtLevel(self, cur_level)) {
  25714. return at::_ops::special_i1::call(self);
  25715. }
  25716. Tensor self_value;
  25717. optional<int64_t> self_bdim;
  25718. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25719. auto results = batch_rule(self_value, self_bdim);
  25720. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25721. }
  25722. template <typename batch_rule_t, batch_rule_t batch_rule>
  25723. at::Tensor special_i1e_generated_plumbing(const at::Tensor & self) {
  25724. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25725. auto maybe_layer = maybeCurrentDynamicLayer();
  25726. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25727. int64_t cur_level = maybe_layer->layerId();
  25728. if (!isBatchedAtLevel(self, cur_level)) {
  25729. return at::_ops::special_i1e::call(self);
  25730. }
  25731. Tensor self_value;
  25732. optional<int64_t> self_bdim;
  25733. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25734. auto results = batch_rule(self_value, self_bdim);
  25735. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25736. }
  25737. template <typename batch_rule_t, batch_rule_t batch_rule>
  25738. at::Tensor special_logit_generated_plumbing(const at::Tensor & self, c10::optional<double> eps) {
  25739. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25740. auto maybe_layer = maybeCurrentDynamicLayer();
  25741. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25742. int64_t cur_level = maybe_layer->layerId();
  25743. if (!isBatchedAtLevel(self, cur_level)) {
  25744. return at::_ops::special_logit::call(self, eps);
  25745. }
  25746. Tensor self_value;
  25747. optional<int64_t> self_bdim;
  25748. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25749. auto results = batch_rule(self_value, self_bdim, eps);
  25750. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25751. }
  25752. template <typename batch_rule_t, batch_rule_t batch_rule>
  25753. at::Tensor special_polygamma_generated_plumbing(int64_t n, const at::Tensor & self) {
  25754. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25755. auto maybe_layer = maybeCurrentDynamicLayer();
  25756. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25757. int64_t cur_level = maybe_layer->layerId();
  25758. if (!isBatchedAtLevel(self, cur_level)) {
  25759. return at::_ops::special_polygamma::call(n, self);
  25760. }
  25761. Tensor self_value;
  25762. optional<int64_t> self_bdim;
  25763. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25764. auto results = batch_rule(n, self_value, self_bdim);
  25765. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25766. }
  25767. template <typename batch_rule_t, batch_rule_t batch_rule>
  25768. at::Tensor special_logsumexp_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
  25769. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25770. auto maybe_layer = maybeCurrentDynamicLayer();
  25771. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25772. int64_t cur_level = maybe_layer->layerId();
  25773. if (!isBatchedAtLevel(self, cur_level)) {
  25774. return at::_ops::special_logsumexp::call(self, dim, keepdim);
  25775. }
  25776. Tensor self_value;
  25777. optional<int64_t> self_bdim;
  25778. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25779. auto results = batch_rule(self_value, self_bdim, dim, keepdim);
  25780. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25781. }
  25782. template <typename batch_rule_t, batch_rule_t batch_rule>
  25783. at::Tensor special_expit_generated_plumbing(const at::Tensor & self) {
  25784. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25785. auto maybe_layer = maybeCurrentDynamicLayer();
  25786. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25787. int64_t cur_level = maybe_layer->layerId();
  25788. if (!isBatchedAtLevel(self, cur_level)) {
  25789. return at::_ops::special_expit::call(self);
  25790. }
  25791. Tensor self_value;
  25792. optional<int64_t> self_bdim;
  25793. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25794. auto results = batch_rule(self_value, self_bdim);
  25795. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25796. }
  25797. template <typename batch_rule_t, batch_rule_t batch_rule>
  25798. at::Tensor special_sinc_generated_plumbing(const at::Tensor & self) {
  25799. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25800. auto maybe_layer = maybeCurrentDynamicLayer();
  25801. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25802. int64_t cur_level = maybe_layer->layerId();
  25803. if (!isBatchedAtLevel(self, cur_level)) {
  25804. return at::_ops::special_sinc::call(self);
  25805. }
  25806. Tensor self_value;
  25807. optional<int64_t> self_bdim;
  25808. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25809. auto results = batch_rule(self_value, self_bdim);
  25810. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25811. }
  25812. template <typename batch_rule_t, batch_rule_t batch_rule>
  25813. at::Tensor special_round_generated_plumbing(const at::Tensor & self, int64_t decimals) {
  25814. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25815. auto maybe_layer = maybeCurrentDynamicLayer();
  25816. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25817. int64_t cur_level = maybe_layer->layerId();
  25818. if (!isBatchedAtLevel(self, cur_level)) {
  25819. return at::_ops::special_round::call(self, decimals);
  25820. }
  25821. Tensor self_value;
  25822. optional<int64_t> self_bdim;
  25823. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25824. auto results = batch_rule(self_value, self_bdim, decimals);
  25825. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25826. }
  25827. template <typename batch_rule_t, batch_rule_t batch_rule>
  25828. at::Tensor special_log1p_generated_plumbing(const at::Tensor & self) {
  25829. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25830. auto maybe_layer = maybeCurrentDynamicLayer();
  25831. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25832. int64_t cur_level = maybe_layer->layerId();
  25833. if (!isBatchedAtLevel(self, cur_level)) {
  25834. return at::_ops::special_log1p::call(self);
  25835. }
  25836. Tensor self_value;
  25837. optional<int64_t> self_bdim;
  25838. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25839. auto results = batch_rule(self_value, self_bdim);
  25840. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25841. }
  25842. template <typename batch_rule_t, batch_rule_t batch_rule>
  25843. at::Tensor special_log_softmax_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
  25844. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25845. auto maybe_layer = maybeCurrentDynamicLayer();
  25846. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25847. int64_t cur_level = maybe_layer->layerId();
  25848. if (!isBatchedAtLevel(self, cur_level)) {
  25849. return at::_ops::special_log_softmax::call(self, dim, dtype);
  25850. }
  25851. Tensor self_value;
  25852. optional<int64_t> self_bdim;
  25853. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25854. auto results = batch_rule(self_value, self_bdim, dim, dtype);
  25855. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25856. }
  25857. template <typename batch_rule_t, batch_rule_t batch_rule>
  25858. at::Tensor special_gammainc_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  25859. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25860. auto maybe_layer = maybeCurrentDynamicLayer();
  25861. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25862. int64_t cur_level = maybe_layer->layerId();
  25863. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  25864. return at::_ops::special_gammainc::call(self, other);
  25865. }
  25866. Tensor self_value;
  25867. optional<int64_t> self_bdim;
  25868. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25869. Tensor other_value;
  25870. optional<int64_t> other_bdim;
  25871. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  25872. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  25873. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25874. }
  25875. template <typename batch_rule_t, batch_rule_t batch_rule>
  25876. at::Tensor special_gammaincc_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  25877. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25878. auto maybe_layer = maybeCurrentDynamicLayer();
  25879. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25880. int64_t cur_level = maybe_layer->layerId();
  25881. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  25882. return at::_ops::special_gammaincc::call(self, other);
  25883. }
  25884. Tensor self_value;
  25885. optional<int64_t> self_bdim;
  25886. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25887. Tensor other_value;
  25888. optional<int64_t> other_bdim;
  25889. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  25890. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  25891. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25892. }
  25893. template <typename batch_rule_t, batch_rule_t batch_rule>
  25894. at::Tensor special_multigammaln_generated_plumbing(const at::Tensor & self, int64_t p) {
  25895. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25896. auto maybe_layer = maybeCurrentDynamicLayer();
  25897. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25898. int64_t cur_level = maybe_layer->layerId();
  25899. if (!isBatchedAtLevel(self, cur_level)) {
  25900. return at::_ops::special_multigammaln::call(self, p);
  25901. }
  25902. Tensor self_value;
  25903. optional<int64_t> self_bdim;
  25904. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25905. auto results = batch_rule(self_value, self_bdim, p);
  25906. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25907. }
  25908. template <typename batch_rule_t, batch_rule_t batch_rule>
  25909. at::Tensor special_softmax_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
  25910. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25911. auto maybe_layer = maybeCurrentDynamicLayer();
  25912. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25913. int64_t cur_level = maybe_layer->layerId();
  25914. if (!isBatchedAtLevel(self, cur_level)) {
  25915. return at::_ops::special_softmax::call(self, dim, dtype);
  25916. }
  25917. Tensor self_value;
  25918. optional<int64_t> self_bdim;
  25919. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25920. auto results = batch_rule(self_value, self_bdim, dim, dtype);
  25921. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25922. }
  25923. template <typename batch_rule_t, batch_rule_t batch_rule>
  25924. at::Tensor fft_fft_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
  25925. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25926. auto maybe_layer = maybeCurrentDynamicLayer();
  25927. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25928. int64_t cur_level = maybe_layer->layerId();
  25929. if (!isBatchedAtLevel(self, cur_level)) {
  25930. return at::_ops::fft_fft::call(self, n, dim, norm);
  25931. }
  25932. Tensor self_value;
  25933. optional<int64_t> self_bdim;
  25934. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25935. auto results = batch_rule(self_value, self_bdim, n, dim, norm);
  25936. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25937. }
  25938. template <typename batch_rule_t, batch_rule_t batch_rule>
  25939. at::Tensor fft_ifft_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
  25940. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25941. auto maybe_layer = maybeCurrentDynamicLayer();
  25942. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25943. int64_t cur_level = maybe_layer->layerId();
  25944. if (!isBatchedAtLevel(self, cur_level)) {
  25945. return at::_ops::fft_ifft::call(self, n, dim, norm);
  25946. }
  25947. Tensor self_value;
  25948. optional<int64_t> self_bdim;
  25949. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25950. auto results = batch_rule(self_value, self_bdim, n, dim, norm);
  25951. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25952. }
  25953. template <typename batch_rule_t, batch_rule_t batch_rule>
  25954. at::Tensor fft_rfft_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
  25955. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25956. auto maybe_layer = maybeCurrentDynamicLayer();
  25957. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25958. int64_t cur_level = maybe_layer->layerId();
  25959. if (!isBatchedAtLevel(self, cur_level)) {
  25960. return at::_ops::fft_rfft::call(self, n, dim, norm);
  25961. }
  25962. Tensor self_value;
  25963. optional<int64_t> self_bdim;
  25964. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25965. auto results = batch_rule(self_value, self_bdim, n, dim, norm);
  25966. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25967. }
  25968. template <typename batch_rule_t, batch_rule_t batch_rule>
  25969. at::Tensor fft_irfft_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
  25970. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25971. auto maybe_layer = maybeCurrentDynamicLayer();
  25972. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25973. int64_t cur_level = maybe_layer->layerId();
  25974. if (!isBatchedAtLevel(self, cur_level)) {
  25975. return at::_ops::fft_irfft::call(self, n, dim, norm);
  25976. }
  25977. Tensor self_value;
  25978. optional<int64_t> self_bdim;
  25979. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25980. auto results = batch_rule(self_value, self_bdim, n, dim, norm);
  25981. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25982. }
  25983. template <typename batch_rule_t, batch_rule_t batch_rule>
  25984. at::Tensor fft_hfft_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
  25985. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  25986. auto maybe_layer = maybeCurrentDynamicLayer();
  25987. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  25988. int64_t cur_level = maybe_layer->layerId();
  25989. if (!isBatchedAtLevel(self, cur_level)) {
  25990. return at::_ops::fft_hfft::call(self, n, dim, norm);
  25991. }
  25992. Tensor self_value;
  25993. optional<int64_t> self_bdim;
  25994. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  25995. auto results = batch_rule(self_value, self_bdim, n, dim, norm);
  25996. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  25997. }
  25998. template <typename batch_rule_t, batch_rule_t batch_rule>
  25999. at::Tensor fft_ihfft_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
  26000. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26001. auto maybe_layer = maybeCurrentDynamicLayer();
  26002. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26003. int64_t cur_level = maybe_layer->layerId();
  26004. if (!isBatchedAtLevel(self, cur_level)) {
  26005. return at::_ops::fft_ihfft::call(self, n, dim, norm);
  26006. }
  26007. Tensor self_value;
  26008. optional<int64_t> self_bdim;
  26009. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26010. auto results = batch_rule(self_value, self_bdim, n, dim, norm);
  26011. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26012. }
  26013. template <typename batch_rule_t, batch_rule_t batch_rule>
  26014. at::Tensor fft_fft2_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
  26015. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26016. auto maybe_layer = maybeCurrentDynamicLayer();
  26017. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26018. int64_t cur_level = maybe_layer->layerId();
  26019. if (!isBatchedAtLevel(self, cur_level)) {
  26020. return at::_ops::fft_fft2::call(self, s, dim, norm);
  26021. }
  26022. Tensor self_value;
  26023. optional<int64_t> self_bdim;
  26024. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26025. auto results = batch_rule(self_value, self_bdim, s, dim, norm);
  26026. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26027. }
  26028. template <typename batch_rule_t, batch_rule_t batch_rule>
  26029. at::Tensor fft_ifft2_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
  26030. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26031. auto maybe_layer = maybeCurrentDynamicLayer();
  26032. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26033. int64_t cur_level = maybe_layer->layerId();
  26034. if (!isBatchedAtLevel(self, cur_level)) {
  26035. return at::_ops::fft_ifft2::call(self, s, dim, norm);
  26036. }
  26037. Tensor self_value;
  26038. optional<int64_t> self_bdim;
  26039. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26040. auto results = batch_rule(self_value, self_bdim, s, dim, norm);
  26041. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26042. }
  26043. template <typename batch_rule_t, batch_rule_t batch_rule>
  26044. at::Tensor fft_rfft2_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
  26045. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26046. auto maybe_layer = maybeCurrentDynamicLayer();
  26047. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26048. int64_t cur_level = maybe_layer->layerId();
  26049. if (!isBatchedAtLevel(self, cur_level)) {
  26050. return at::_ops::fft_rfft2::call(self, s, dim, norm);
  26051. }
  26052. Tensor self_value;
  26053. optional<int64_t> self_bdim;
  26054. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26055. auto results = batch_rule(self_value, self_bdim, s, dim, norm);
  26056. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26057. }
  26058. template <typename batch_rule_t, batch_rule_t batch_rule>
  26059. at::Tensor fft_irfft2_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
  26060. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26061. auto maybe_layer = maybeCurrentDynamicLayer();
  26062. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26063. int64_t cur_level = maybe_layer->layerId();
  26064. if (!isBatchedAtLevel(self, cur_level)) {
  26065. return at::_ops::fft_irfft2::call(self, s, dim, norm);
  26066. }
  26067. Tensor self_value;
  26068. optional<int64_t> self_bdim;
  26069. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26070. auto results = batch_rule(self_value, self_bdim, s, dim, norm);
  26071. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26072. }
  26073. template <typename batch_rule_t, batch_rule_t batch_rule>
  26074. at::Tensor fft_hfft2_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
  26075. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26076. auto maybe_layer = maybeCurrentDynamicLayer();
  26077. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26078. int64_t cur_level = maybe_layer->layerId();
  26079. if (!isBatchedAtLevel(self, cur_level)) {
  26080. return at::_ops::fft_hfft2::call(self, s, dim, norm);
  26081. }
  26082. Tensor self_value;
  26083. optional<int64_t> self_bdim;
  26084. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26085. auto results = batch_rule(self_value, self_bdim, s, dim, norm);
  26086. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26087. }
  26088. template <typename batch_rule_t, batch_rule_t batch_rule>
  26089. at::Tensor fft_ihfft2_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
  26090. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26091. auto maybe_layer = maybeCurrentDynamicLayer();
  26092. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26093. int64_t cur_level = maybe_layer->layerId();
  26094. if (!isBatchedAtLevel(self, cur_level)) {
  26095. return at::_ops::fft_ihfft2::call(self, s, dim, norm);
  26096. }
  26097. Tensor self_value;
  26098. optional<int64_t> self_bdim;
  26099. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26100. auto results = batch_rule(self_value, self_bdim, s, dim, norm);
  26101. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26102. }
  26103. template <typename batch_rule_t, batch_rule_t batch_rule>
  26104. at::Tensor fft_fftn_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
  26105. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26106. auto maybe_layer = maybeCurrentDynamicLayer();
  26107. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26108. int64_t cur_level = maybe_layer->layerId();
  26109. if (!isBatchedAtLevel(self, cur_level)) {
  26110. return at::_ops::fft_fftn::call(self, s, dim, norm);
  26111. }
  26112. Tensor self_value;
  26113. optional<int64_t> self_bdim;
  26114. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26115. auto results = batch_rule(self_value, self_bdim, s, dim, norm);
  26116. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26117. }
  26118. template <typename batch_rule_t, batch_rule_t batch_rule>
  26119. at::Tensor fft_ifftn_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
  26120. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26121. auto maybe_layer = maybeCurrentDynamicLayer();
  26122. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26123. int64_t cur_level = maybe_layer->layerId();
  26124. if (!isBatchedAtLevel(self, cur_level)) {
  26125. return at::_ops::fft_ifftn::call(self, s, dim, norm);
  26126. }
  26127. Tensor self_value;
  26128. optional<int64_t> self_bdim;
  26129. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26130. auto results = batch_rule(self_value, self_bdim, s, dim, norm);
  26131. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26132. }
  26133. template <typename batch_rule_t, batch_rule_t batch_rule>
  26134. at::Tensor fft_rfftn_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
  26135. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26136. auto maybe_layer = maybeCurrentDynamicLayer();
  26137. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26138. int64_t cur_level = maybe_layer->layerId();
  26139. if (!isBatchedAtLevel(self, cur_level)) {
  26140. return at::_ops::fft_rfftn::call(self, s, dim, norm);
  26141. }
  26142. Tensor self_value;
  26143. optional<int64_t> self_bdim;
  26144. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26145. auto results = batch_rule(self_value, self_bdim, s, dim, norm);
  26146. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26147. }
  26148. template <typename batch_rule_t, batch_rule_t batch_rule>
  26149. at::Tensor fft_irfftn_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
  26150. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26151. auto maybe_layer = maybeCurrentDynamicLayer();
  26152. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26153. int64_t cur_level = maybe_layer->layerId();
  26154. if (!isBatchedAtLevel(self, cur_level)) {
  26155. return at::_ops::fft_irfftn::call(self, s, dim, norm);
  26156. }
  26157. Tensor self_value;
  26158. optional<int64_t> self_bdim;
  26159. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26160. auto results = batch_rule(self_value, self_bdim, s, dim, norm);
  26161. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26162. }
  26163. template <typename batch_rule_t, batch_rule_t batch_rule>
  26164. at::Tensor fft_hfftn_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
  26165. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26166. auto maybe_layer = maybeCurrentDynamicLayer();
  26167. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26168. int64_t cur_level = maybe_layer->layerId();
  26169. if (!isBatchedAtLevel(self, cur_level)) {
  26170. return at::_ops::fft_hfftn::call(self, s, dim, norm);
  26171. }
  26172. Tensor self_value;
  26173. optional<int64_t> self_bdim;
  26174. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26175. auto results = batch_rule(self_value, self_bdim, s, dim, norm);
  26176. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26177. }
  26178. template <typename batch_rule_t, batch_rule_t batch_rule>
  26179. at::Tensor fft_ihfftn_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
  26180. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26181. auto maybe_layer = maybeCurrentDynamicLayer();
  26182. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26183. int64_t cur_level = maybe_layer->layerId();
  26184. if (!isBatchedAtLevel(self, cur_level)) {
  26185. return at::_ops::fft_ihfftn::call(self, s, dim, norm);
  26186. }
  26187. Tensor self_value;
  26188. optional<int64_t> self_bdim;
  26189. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26190. auto results = batch_rule(self_value, self_bdim, s, dim, norm);
  26191. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26192. }
  26193. template <typename batch_rule_t, batch_rule_t batch_rule>
  26194. at::Tensor fft_fftshift_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim) {
  26195. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26196. auto maybe_layer = maybeCurrentDynamicLayer();
  26197. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26198. int64_t cur_level = maybe_layer->layerId();
  26199. if (!isBatchedAtLevel(self, cur_level)) {
  26200. return at::_ops::fft_fftshift::call(self, dim);
  26201. }
  26202. Tensor self_value;
  26203. optional<int64_t> self_bdim;
  26204. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26205. auto results = batch_rule(self_value, self_bdim, dim);
  26206. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26207. }
  26208. template <typename batch_rule_t, batch_rule_t batch_rule>
  26209. at::Tensor fft_ifftshift_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim) {
  26210. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26211. auto maybe_layer = maybeCurrentDynamicLayer();
  26212. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26213. int64_t cur_level = maybe_layer->layerId();
  26214. if (!isBatchedAtLevel(self, cur_level)) {
  26215. return at::_ops::fft_ifftshift::call(self, dim);
  26216. }
  26217. Tensor self_value;
  26218. optional<int64_t> self_bdim;
  26219. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26220. auto results = batch_rule(self_value, self_bdim, dim);
  26221. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26222. }
  26223. template <typename batch_rule_t, batch_rule_t batch_rule>
  26224. ::std::tuple<at::Tensor,at::Tensor> linalg_cholesky_ex_generated_plumbing(const at::Tensor & self, bool upper, bool check_errors) {
  26225. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26226. auto maybe_layer = maybeCurrentDynamicLayer();
  26227. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26228. int64_t cur_level = maybe_layer->layerId();
  26229. if (!isBatchedAtLevel(self, cur_level)) {
  26230. return at::_ops::linalg_cholesky_ex::call(self, upper, check_errors);
  26231. }
  26232. Tensor self_value;
  26233. optional<int64_t> self_bdim;
  26234. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26235. auto results = batch_rule(self_value, self_bdim, upper, check_errors);
  26236. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  26237. }
  26238. template <typename batch_rule_t, batch_rule_t batch_rule>
  26239. at::Tensor linalg_cholesky_generated_plumbing(const at::Tensor & self, bool upper) {
  26240. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26241. auto maybe_layer = maybeCurrentDynamicLayer();
  26242. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26243. int64_t cur_level = maybe_layer->layerId();
  26244. if (!isBatchedAtLevel(self, cur_level)) {
  26245. return at::_ops::linalg_cholesky::call(self, upper);
  26246. }
  26247. Tensor self_value;
  26248. optional<int64_t> self_bdim;
  26249. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26250. auto results = batch_rule(self_value, self_bdim, upper);
  26251. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26252. }
  26253. template <typename batch_rule_t, batch_rule_t batch_rule>
  26254. at::Tensor linalg_cross_generated_plumbing(const at::Tensor & self, const at::Tensor & other, int64_t dim) {
  26255. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26256. auto maybe_layer = maybeCurrentDynamicLayer();
  26257. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26258. int64_t cur_level = maybe_layer->layerId();
  26259. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  26260. return at::_ops::linalg_cross::call(self, other, dim);
  26261. }
  26262. Tensor self_value;
  26263. optional<int64_t> self_bdim;
  26264. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26265. Tensor other_value;
  26266. optional<int64_t> other_bdim;
  26267. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  26268. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, dim);
  26269. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26270. }
  26271. template <typename batch_rule_t, batch_rule_t batch_rule>
  26272. ::std::tuple<at::Tensor,at::Tensor> linalg_lu_factor_generated_plumbing(const at::Tensor & A, bool pivot) {
  26273. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26274. auto maybe_layer = maybeCurrentDynamicLayer();
  26275. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26276. int64_t cur_level = maybe_layer->layerId();
  26277. if (!isBatchedAtLevel(A, cur_level)) {
  26278. return at::_ops::linalg_lu_factor::call(A, pivot);
  26279. }
  26280. Tensor A_value;
  26281. optional<int64_t> A_bdim;
  26282. std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
  26283. auto results = batch_rule(A_value, A_bdim, pivot);
  26284. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  26285. }
  26286. template <typename batch_rule_t, batch_rule_t batch_rule>
  26287. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_lu_factor_ex_generated_plumbing(const at::Tensor & A, bool pivot, bool check_errors) {
  26288. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26289. auto maybe_layer = maybeCurrentDynamicLayer();
  26290. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26291. int64_t cur_level = maybe_layer->layerId();
  26292. if (!isBatchedAtLevel(A, cur_level)) {
  26293. return at::_ops::linalg_lu_factor_ex::call(A, pivot, check_errors);
  26294. }
  26295. Tensor A_value;
  26296. optional<int64_t> A_bdim;
  26297. std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
  26298. auto results = batch_rule(A_value, A_bdim, pivot, check_errors);
  26299. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  26300. }
  26301. template <typename batch_rule_t, batch_rule_t batch_rule>
  26302. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_lu_generated_plumbing(const at::Tensor & A, bool pivot) {
  26303. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26304. auto maybe_layer = maybeCurrentDynamicLayer();
  26305. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26306. int64_t cur_level = maybe_layer->layerId();
  26307. if (!isBatchedAtLevel(A, cur_level)) {
  26308. return at::_ops::linalg_lu::call(A, pivot);
  26309. }
  26310. Tensor A_value;
  26311. optional<int64_t> A_bdim;
  26312. std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
  26313. auto results = batch_rule(A_value, A_bdim, pivot);
  26314. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  26315. }
  26316. template <typename batch_rule_t, batch_rule_t batch_rule>
  26317. at::Tensor linalg_lu_solve_generated_plumbing(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint) {
  26318. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26319. auto maybe_layer = maybeCurrentDynamicLayer();
  26320. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26321. int64_t cur_level = maybe_layer->layerId();
  26322. if (!isBatchedAtLevel(LU, cur_level) && !isBatchedAtLevel(pivots, cur_level) && !isBatchedAtLevel(B, cur_level)) {
  26323. return at::_ops::linalg_lu_solve::call(LU, pivots, B, left, adjoint);
  26324. }
  26325. Tensor LU_value;
  26326. optional<int64_t> LU_bdim;
  26327. std::tie(LU_value, LU_bdim) = unwrapTensorAtLevel(LU, cur_level);
  26328. Tensor pivots_value;
  26329. optional<int64_t> pivots_bdim;
  26330. std::tie(pivots_value, pivots_bdim) = unwrapTensorAtLevel(pivots, cur_level);
  26331. Tensor B_value;
  26332. optional<int64_t> B_bdim;
  26333. std::tie(B_value, B_bdim) = unwrapTensorAtLevel(B, cur_level);
  26334. auto results = batch_rule(LU_value, LU_bdim, pivots_value, pivots_bdim, B_value, B_bdim, left, adjoint);
  26335. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26336. }
  26337. template <typename batch_rule_t, batch_rule_t batch_rule>
  26338. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _linalg_det_generated_plumbing(const at::Tensor & A) {
  26339. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26340. auto maybe_layer = maybeCurrentDynamicLayer();
  26341. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26342. int64_t cur_level = maybe_layer->layerId();
  26343. if (!isBatchedAtLevel(A, cur_level)) {
  26344. return at::_ops::_linalg_det::call(A);
  26345. }
  26346. Tensor A_value;
  26347. optional<int64_t> A_bdim;
  26348. std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
  26349. auto results = batch_rule(A_value, A_bdim);
  26350. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  26351. }
  26352. template <typename batch_rule_t, batch_rule_t batch_rule>
  26353. at::Tensor linalg_det_generated_plumbing(const at::Tensor & A) {
  26354. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26355. auto maybe_layer = maybeCurrentDynamicLayer();
  26356. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26357. int64_t cur_level = maybe_layer->layerId();
  26358. if (!isBatchedAtLevel(A, cur_level)) {
  26359. return at::_ops::linalg_det::call(A);
  26360. }
  26361. Tensor A_value;
  26362. optional<int64_t> A_bdim;
  26363. std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
  26364. auto results = batch_rule(A_value, A_bdim);
  26365. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26366. }
  26367. template <typename batch_rule_t, batch_rule_t batch_rule>
  26368. at::Tensor det_generated_plumbing(const at::Tensor & self) {
  26369. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26370. auto maybe_layer = maybeCurrentDynamicLayer();
  26371. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26372. int64_t cur_level = maybe_layer->layerId();
  26373. if (!isBatchedAtLevel(self, cur_level)) {
  26374. return at::_ops::det::call(self);
  26375. }
  26376. Tensor self_value;
  26377. optional<int64_t> self_bdim;
  26378. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26379. auto results = batch_rule(self_value, self_bdim);
  26380. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26381. }
  26382. template <typename batch_rule_t, batch_rule_t batch_rule>
  26383. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_ldl_factor_ex_generated_plumbing(const at::Tensor & self, bool hermitian, bool check_errors) {
  26384. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26385. auto maybe_layer = maybeCurrentDynamicLayer();
  26386. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26387. int64_t cur_level = maybe_layer->layerId();
  26388. if (!isBatchedAtLevel(self, cur_level)) {
  26389. return at::_ops::linalg_ldl_factor_ex::call(self, hermitian, check_errors);
  26390. }
  26391. Tensor self_value;
  26392. optional<int64_t> self_bdim;
  26393. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26394. auto results = batch_rule(self_value, self_bdim, hermitian, check_errors);
  26395. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  26396. }
  26397. template <typename batch_rule_t, batch_rule_t batch_rule>
  26398. ::std::tuple<at::Tensor,at::Tensor> linalg_ldl_factor_generated_plumbing(const at::Tensor & self, bool hermitian) {
  26399. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26400. auto maybe_layer = maybeCurrentDynamicLayer();
  26401. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26402. int64_t cur_level = maybe_layer->layerId();
  26403. if (!isBatchedAtLevel(self, cur_level)) {
  26404. return at::_ops::linalg_ldl_factor::call(self, hermitian);
  26405. }
  26406. Tensor self_value;
  26407. optional<int64_t> self_bdim;
  26408. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26409. auto results = batch_rule(self_value, self_bdim, hermitian);
  26410. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  26411. }
  26412. template <typename batch_rule_t, batch_rule_t batch_rule>
  26413. at::Tensor linalg_ldl_solve_generated_plumbing(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian) {
  26414. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26415. auto maybe_layer = maybeCurrentDynamicLayer();
  26416. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26417. int64_t cur_level = maybe_layer->layerId();
  26418. if (!isBatchedAtLevel(LD, cur_level) && !isBatchedAtLevel(pivots, cur_level) && !isBatchedAtLevel(B, cur_level)) {
  26419. return at::_ops::linalg_ldl_solve::call(LD, pivots, B, hermitian);
  26420. }
  26421. Tensor LD_value;
  26422. optional<int64_t> LD_bdim;
  26423. std::tie(LD_value, LD_bdim) = unwrapTensorAtLevel(LD, cur_level);
  26424. Tensor pivots_value;
  26425. optional<int64_t> pivots_bdim;
  26426. std::tie(pivots_value, pivots_bdim) = unwrapTensorAtLevel(pivots, cur_level);
  26427. Tensor B_value;
  26428. optional<int64_t> B_bdim;
  26429. std::tie(B_value, B_bdim) = unwrapTensorAtLevel(B, cur_level);
  26430. auto results = batch_rule(LD_value, LD_bdim, pivots_value, pivots_bdim, B_value, B_bdim, hermitian);
  26431. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26432. }
  26433. template <typename batch_rule_t, batch_rule_t batch_rule>
  26434. ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> linalg_lstsq_generated_plumbing(const at::Tensor & self, const at::Tensor & b, c10::optional<double> rcond, c10::optional<c10::string_view> driver) {
  26435. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26436. auto maybe_layer = maybeCurrentDynamicLayer();
  26437. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26438. int64_t cur_level = maybe_layer->layerId();
  26439. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(b, cur_level)) {
  26440. return at::_ops::linalg_lstsq::call(self, b, rcond, driver);
  26441. }
  26442. Tensor self_value;
  26443. optional<int64_t> self_bdim;
  26444. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26445. Tensor b_value;
  26446. optional<int64_t> b_bdim;
  26447. std::tie(b_value, b_bdim) = unwrapTensorAtLevel(b, cur_level);
  26448. auto results = batch_rule(self_value, self_bdim, b_value, b_bdim, rcond, driver);
  26449. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
  26450. }
  26451. template <typename batch_rule_t, batch_rule_t batch_rule>
  26452. at::Tensor linalg_matmul_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  26453. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26454. auto maybe_layer = maybeCurrentDynamicLayer();
  26455. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26456. int64_t cur_level = maybe_layer->layerId();
  26457. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  26458. return at::_ops::linalg_matmul::call(self, other);
  26459. }
  26460. Tensor self_value;
  26461. optional<int64_t> self_bdim;
  26462. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26463. Tensor other_value;
  26464. optional<int64_t> other_bdim;
  26465. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  26466. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  26467. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26468. }
  26469. template <typename batch_rule_t, batch_rule_t batch_rule>
  26470. at::Tensor linalg_vecdot_generated_plumbing(const at::Tensor & x, const at::Tensor & y, int64_t dim) {
  26471. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26472. auto maybe_layer = maybeCurrentDynamicLayer();
  26473. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26474. int64_t cur_level = maybe_layer->layerId();
  26475. if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(y, cur_level)) {
  26476. return at::_ops::linalg_vecdot::call(x, y, dim);
  26477. }
  26478. Tensor x_value;
  26479. optional<int64_t> x_bdim;
  26480. std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
  26481. Tensor y_value;
  26482. optional<int64_t> y_bdim;
  26483. std::tie(y_value, y_bdim) = unwrapTensorAtLevel(y, cur_level);
  26484. auto results = batch_rule(x_value, x_bdim, y_value, y_bdim, dim);
  26485. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26486. }
  26487. template <typename batch_rule_t, batch_rule_t batch_rule>
  26488. at::Tensor linalg_matrix_exp_generated_plumbing(const at::Tensor & self) {
  26489. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26490. auto maybe_layer = maybeCurrentDynamicLayer();
  26491. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26492. int64_t cur_level = maybe_layer->layerId();
  26493. if (!isBatchedAtLevel(self, cur_level)) {
  26494. return at::_ops::linalg_matrix_exp::call(self);
  26495. }
  26496. Tensor self_value;
  26497. optional<int64_t> self_bdim;
  26498. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26499. auto results = batch_rule(self_value, self_bdim);
  26500. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26501. }
  26502. template <typename batch_rule_t, batch_rule_t batch_rule>
  26503. ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _linalg_slogdet_generated_plumbing(const at::Tensor & A) {
  26504. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26505. auto maybe_layer = maybeCurrentDynamicLayer();
  26506. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26507. int64_t cur_level = maybe_layer->layerId();
  26508. if (!isBatchedAtLevel(A, cur_level)) {
  26509. return at::_ops::_linalg_slogdet::call(A);
  26510. }
  26511. Tensor A_value;
  26512. optional<int64_t> A_bdim;
  26513. std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
  26514. auto results = batch_rule(A_value, A_bdim);
  26515. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
  26516. }
  26517. template <typename batch_rule_t, batch_rule_t batch_rule>
  26518. ::std::tuple<at::Tensor,at::Tensor> linalg_slogdet_generated_plumbing(const at::Tensor & A) {
  26519. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26520. auto maybe_layer = maybeCurrentDynamicLayer();
  26521. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26522. int64_t cur_level = maybe_layer->layerId();
  26523. if (!isBatchedAtLevel(A, cur_level)) {
  26524. return at::_ops::linalg_slogdet::call(A);
  26525. }
  26526. Tensor A_value;
  26527. optional<int64_t> A_bdim;
  26528. std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
  26529. auto results = batch_rule(A_value, A_bdim);
  26530. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  26531. }
  26532. template <typename batch_rule_t, batch_rule_t batch_rule>
  26533. ::std::tuple<at::Tensor,at::Tensor> slogdet_generated_plumbing(const at::Tensor & self) {
  26534. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26535. auto maybe_layer = maybeCurrentDynamicLayer();
  26536. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26537. int64_t cur_level = maybe_layer->layerId();
  26538. if (!isBatchedAtLevel(self, cur_level)) {
  26539. return at::_ops::slogdet::call(self);
  26540. }
  26541. Tensor self_value;
  26542. optional<int64_t> self_bdim;
  26543. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26544. auto results = batch_rule(self_value, self_bdim);
  26545. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  26546. }
  26547. template <typename batch_rule_t, batch_rule_t batch_rule>
  26548. at::Tensor logdet_generated_plumbing(const at::Tensor & self) {
  26549. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26550. auto maybe_layer = maybeCurrentDynamicLayer();
  26551. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26552. int64_t cur_level = maybe_layer->layerId();
  26553. if (!isBatchedAtLevel(self, cur_level)) {
  26554. return at::_ops::logdet::call(self);
  26555. }
  26556. Tensor self_value;
  26557. optional<int64_t> self_bdim;
  26558. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26559. auto results = batch_rule(self_value, self_bdim);
  26560. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26561. }
  26562. template <typename batch_rule_t, batch_rule_t batch_rule>
  26563. ::std::tuple<at::Tensor,at::Tensor> linalg_eig_generated_plumbing(const at::Tensor & self) {
  26564. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26565. auto maybe_layer = maybeCurrentDynamicLayer();
  26566. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26567. int64_t cur_level = maybe_layer->layerId();
  26568. if (!isBatchedAtLevel(self, cur_level)) {
  26569. return at::_ops::linalg_eig::call(self);
  26570. }
  26571. Tensor self_value;
  26572. optional<int64_t> self_bdim;
  26573. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26574. auto results = batch_rule(self_value, self_bdim);
  26575. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  26576. }
  26577. template <typename batch_rule_t, batch_rule_t batch_rule>
  26578. at::Tensor linalg_eigvals_generated_plumbing(const at::Tensor & self) {
  26579. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26580. auto maybe_layer = maybeCurrentDynamicLayer();
  26581. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26582. int64_t cur_level = maybe_layer->layerId();
  26583. if (!isBatchedAtLevel(self, cur_level)) {
  26584. return at::_ops::linalg_eigvals::call(self);
  26585. }
  26586. Tensor self_value;
  26587. optional<int64_t> self_bdim;
  26588. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26589. auto results = batch_rule(self_value, self_bdim);
  26590. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26591. }
  26592. template <typename batch_rule_t, batch_rule_t batch_rule>
  26593. ::std::tuple<at::Tensor,at::Tensor> _linalg_eigh_generated_plumbing(const at::Tensor & A, c10::string_view UPLO, bool compute_v) {
  26594. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26595. auto maybe_layer = maybeCurrentDynamicLayer();
  26596. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26597. int64_t cur_level = maybe_layer->layerId();
  26598. if (!isBatchedAtLevel(A, cur_level)) {
  26599. return at::_ops::_linalg_eigh::call(A, UPLO, compute_v);
  26600. }
  26601. Tensor A_value;
  26602. optional<int64_t> A_bdim;
  26603. std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
  26604. auto results = batch_rule(A_value, A_bdim, UPLO, compute_v);
  26605. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  26606. }
  26607. template <typename batch_rule_t, batch_rule_t batch_rule>
  26608. ::std::tuple<at::Tensor,at::Tensor> linalg_eigh_generated_plumbing(const at::Tensor & self, c10::string_view UPLO) {
  26609. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26610. auto maybe_layer = maybeCurrentDynamicLayer();
  26611. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26612. int64_t cur_level = maybe_layer->layerId();
  26613. if (!isBatchedAtLevel(self, cur_level)) {
  26614. return at::_ops::linalg_eigh::call(self, UPLO);
  26615. }
  26616. Tensor self_value;
  26617. optional<int64_t> self_bdim;
  26618. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26619. auto results = batch_rule(self_value, self_bdim, UPLO);
  26620. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  26621. }
  26622. template <typename batch_rule_t, batch_rule_t batch_rule>
  26623. at::Tensor linalg_eigvalsh_generated_plumbing(const at::Tensor & self, c10::string_view UPLO) {
  26624. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26625. auto maybe_layer = maybeCurrentDynamicLayer();
  26626. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26627. int64_t cur_level = maybe_layer->layerId();
  26628. if (!isBatchedAtLevel(self, cur_level)) {
  26629. return at::_ops::linalg_eigvalsh::call(self, UPLO);
  26630. }
  26631. Tensor self_value;
  26632. optional<int64_t> self_bdim;
  26633. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26634. auto results = batch_rule(self_value, self_bdim, UPLO);
  26635. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26636. }
  26637. template <typename batch_rule_t, batch_rule_t batch_rule>
  26638. at::Tensor linalg_householder_product_generated_plumbing(const at::Tensor & input, const at::Tensor & tau) {
  26639. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26640. auto maybe_layer = maybeCurrentDynamicLayer();
  26641. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26642. int64_t cur_level = maybe_layer->layerId();
  26643. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(tau, cur_level)) {
  26644. return at::_ops::linalg_householder_product::call(input, tau);
  26645. }
  26646. Tensor input_value;
  26647. optional<int64_t> input_bdim;
  26648. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  26649. Tensor tau_value;
  26650. optional<int64_t> tau_bdim;
  26651. std::tie(tau_value, tau_bdim) = unwrapTensorAtLevel(tau, cur_level);
  26652. auto results = batch_rule(input_value, input_bdim, tau_value, tau_bdim);
  26653. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26654. }
  26655. template <typename batch_rule_t, batch_rule_t batch_rule>
  26656. ::std::tuple<at::Tensor,at::Tensor> linalg_inv_ex_generated_plumbing(const at::Tensor & A, bool check_errors) {
  26657. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26658. auto maybe_layer = maybeCurrentDynamicLayer();
  26659. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26660. int64_t cur_level = maybe_layer->layerId();
  26661. if (!isBatchedAtLevel(A, cur_level)) {
  26662. return at::_ops::linalg_inv_ex::call(A, check_errors);
  26663. }
  26664. Tensor A_value;
  26665. optional<int64_t> A_bdim;
  26666. std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
  26667. auto results = batch_rule(A_value, A_bdim, check_errors);
  26668. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  26669. }
  26670. template <typename batch_rule_t, batch_rule_t batch_rule>
  26671. at::Tensor linalg_inv_generated_plumbing(const at::Tensor & A) {
  26672. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26673. auto maybe_layer = maybeCurrentDynamicLayer();
  26674. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26675. int64_t cur_level = maybe_layer->layerId();
  26676. if (!isBatchedAtLevel(A, cur_level)) {
  26677. return at::_ops::linalg_inv::call(A);
  26678. }
  26679. Tensor A_value;
  26680. optional<int64_t> A_bdim;
  26681. std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
  26682. auto results = batch_rule(A_value, A_bdim);
  26683. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26684. }
  26685. template <typename batch_rule_t, batch_rule_t batch_rule>
  26686. at::Tensor inverse_generated_plumbing(const at::Tensor & self) {
  26687. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26688. auto maybe_layer = maybeCurrentDynamicLayer();
  26689. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26690. int64_t cur_level = maybe_layer->layerId();
  26691. if (!isBatchedAtLevel(self, cur_level)) {
  26692. return at::_ops::inverse::call(self);
  26693. }
  26694. Tensor self_value;
  26695. optional<int64_t> self_bdim;
  26696. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26697. auto results = batch_rule(self_value, self_bdim);
  26698. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26699. }
  26700. template <typename batch_rule_t, batch_rule_t batch_rule>
  26701. at::Tensor inner_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
  26702. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26703. auto maybe_layer = maybeCurrentDynamicLayer();
  26704. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26705. int64_t cur_level = maybe_layer->layerId();
  26706. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  26707. return at::_ops::inner::call(self, other);
  26708. }
  26709. Tensor self_value;
  26710. optional<int64_t> self_bdim;
  26711. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26712. Tensor other_value;
  26713. optional<int64_t> other_bdim;
  26714. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  26715. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
  26716. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26717. }
  26718. template <typename batch_rule_t, batch_rule_t batch_rule>
  26719. at::Tensor outer_generated_plumbing(const at::Tensor & self, const at::Tensor & vec2) {
  26720. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26721. auto maybe_layer = maybeCurrentDynamicLayer();
  26722. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26723. int64_t cur_level = maybe_layer->layerId();
  26724. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(vec2, cur_level)) {
  26725. return at::_ops::outer::call(self, vec2);
  26726. }
  26727. Tensor self_value;
  26728. optional<int64_t> self_bdim;
  26729. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26730. Tensor vec2_value;
  26731. optional<int64_t> vec2_bdim;
  26732. std::tie(vec2_value, vec2_bdim) = unwrapTensorAtLevel(vec2, cur_level);
  26733. auto results = batch_rule(self_value, self_bdim, vec2_value, vec2_bdim);
  26734. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26735. }
  26736. template <typename batch_rule_t, batch_rule_t batch_rule>
  26737. at::Tensor ger_generated_plumbing(const at::Tensor & self, const at::Tensor & vec2) {
  26738. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26739. auto maybe_layer = maybeCurrentDynamicLayer();
  26740. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26741. int64_t cur_level = maybe_layer->layerId();
  26742. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(vec2, cur_level)) {
  26743. return at::_ops::ger::call(self, vec2);
  26744. }
  26745. Tensor self_value;
  26746. optional<int64_t> self_bdim;
  26747. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26748. Tensor vec2_value;
  26749. optional<int64_t> vec2_bdim;
  26750. std::tie(vec2_value, vec2_bdim) = unwrapTensorAtLevel(vec2, cur_level);
  26751. auto results = batch_rule(self_value, self_bdim, vec2_value, vec2_bdim);
  26752. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26753. }
  26754. template <typename batch_rule_t, batch_rule_t batch_rule>
  26755. at::Tensor linalg_norm_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
  26756. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26757. auto maybe_layer = maybeCurrentDynamicLayer();
  26758. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26759. int64_t cur_level = maybe_layer->layerId();
  26760. if (!isBatchedAtLevel(self, cur_level)) {
  26761. return at::_ops::linalg_norm::call(self, ord, dim, keepdim, dtype);
  26762. }
  26763. Tensor self_value;
  26764. optional<int64_t> self_bdim;
  26765. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26766. auto results = batch_rule(self_value, self_bdim, ord, dim, keepdim, dtype);
  26767. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26768. }
  26769. template <typename batch_rule_t, batch_rule_t batch_rule>
  26770. at::Tensor linalg_norm_ord_str_generated_plumbing(const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
  26771. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26772. auto maybe_layer = maybeCurrentDynamicLayer();
  26773. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26774. int64_t cur_level = maybe_layer->layerId();
  26775. if (!isBatchedAtLevel(self, cur_level)) {
  26776. return at::_ops::linalg_norm_ord_str::call(self, ord, dim, keepdim, dtype);
  26777. }
  26778. Tensor self_value;
  26779. optional<int64_t> self_bdim;
  26780. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26781. auto results = batch_rule(self_value, self_bdim, ord, dim, keepdim, dtype);
  26782. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26783. }
  26784. template <typename batch_rule_t, batch_rule_t batch_rule>
  26785. at::Tensor linalg_vector_norm_generated_plumbing(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
  26786. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26787. auto maybe_layer = maybeCurrentDynamicLayer();
  26788. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26789. int64_t cur_level = maybe_layer->layerId();
  26790. if (!isBatchedAtLevel(self, cur_level)) {
  26791. return at::_ops::linalg_vector_norm::call(self, ord, dim, keepdim, dtype);
  26792. }
  26793. Tensor self_value;
  26794. optional<int64_t> self_bdim;
  26795. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26796. auto results = batch_rule(self_value, self_bdim, ord, dim, keepdim, dtype);
  26797. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26798. }
  26799. template <typename batch_rule_t, batch_rule_t batch_rule>
  26800. at::Tensor linalg_matrix_norm_generated_plumbing(const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
  26801. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26802. auto maybe_layer = maybeCurrentDynamicLayer();
  26803. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26804. int64_t cur_level = maybe_layer->layerId();
  26805. if (!isBatchedAtLevel(self, cur_level)) {
  26806. return at::_ops::linalg_matrix_norm::call(self, ord, dim, keepdim, dtype);
  26807. }
  26808. Tensor self_value;
  26809. optional<int64_t> self_bdim;
  26810. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26811. auto results = batch_rule(self_value, self_bdim, ord, dim, keepdim, dtype);
  26812. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26813. }
  26814. template <typename batch_rule_t, batch_rule_t batch_rule>
  26815. at::Tensor linalg_matrix_norm_str_ord_generated_plumbing(const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
  26816. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26817. auto maybe_layer = maybeCurrentDynamicLayer();
  26818. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26819. int64_t cur_level = maybe_layer->layerId();
  26820. if (!isBatchedAtLevel(self, cur_level)) {
  26821. return at::_ops::linalg_matrix_norm_str_ord::call(self, ord, dim, keepdim, dtype);
  26822. }
  26823. Tensor self_value;
  26824. optional<int64_t> self_bdim;
  26825. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26826. auto results = batch_rule(self_value, self_bdim, ord, dim, keepdim, dtype);
  26827. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26828. }
  26829. template <typename batch_rule_t, batch_rule_t batch_rule>
  26830. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _linalg_svd_generated_plumbing(const at::Tensor & A, bool full_matrices, bool compute_uv, c10::optional<c10::string_view> driver) {
  26831. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26832. auto maybe_layer = maybeCurrentDynamicLayer();
  26833. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26834. int64_t cur_level = maybe_layer->layerId();
  26835. if (!isBatchedAtLevel(A, cur_level)) {
  26836. return at::_ops::_linalg_svd::call(A, full_matrices, compute_uv, driver);
  26837. }
  26838. Tensor A_value;
  26839. optional<int64_t> A_bdim;
  26840. std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
  26841. auto results = batch_rule(A_value, A_bdim, full_matrices, compute_uv, driver);
  26842. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  26843. }
  26844. template <typename batch_rule_t, batch_rule_t batch_rule>
  26845. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_svd_generated_plumbing(const at::Tensor & A, bool full_matrices, c10::optional<c10::string_view> driver) {
  26846. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26847. auto maybe_layer = maybeCurrentDynamicLayer();
  26848. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26849. int64_t cur_level = maybe_layer->layerId();
  26850. if (!isBatchedAtLevel(A, cur_level)) {
  26851. return at::_ops::linalg_svd::call(A, full_matrices, driver);
  26852. }
  26853. Tensor A_value;
  26854. optional<int64_t> A_bdim;
  26855. std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
  26856. auto results = batch_rule(A_value, A_bdim, full_matrices, driver);
  26857. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  26858. }
  26859. template <typename batch_rule_t, batch_rule_t batch_rule>
  26860. at::Tensor linalg_svdvals_generated_plumbing(const at::Tensor & A, c10::optional<c10::string_view> driver) {
  26861. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26862. auto maybe_layer = maybeCurrentDynamicLayer();
  26863. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26864. int64_t cur_level = maybe_layer->layerId();
  26865. if (!isBatchedAtLevel(A, cur_level)) {
  26866. return at::_ops::linalg_svdvals::call(A, driver);
  26867. }
  26868. Tensor A_value;
  26869. optional<int64_t> A_bdim;
  26870. std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
  26871. auto results = batch_rule(A_value, A_bdim, driver);
  26872. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26873. }
  26874. template <typename batch_rule_t, batch_rule_t batch_rule>
  26875. at::Tensor linalg_cond_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & p) {
  26876. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26877. auto maybe_layer = maybeCurrentDynamicLayer();
  26878. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26879. int64_t cur_level = maybe_layer->layerId();
  26880. if (!isBatchedAtLevel(self, cur_level)) {
  26881. return at::_ops::linalg_cond::call(self, p);
  26882. }
  26883. Tensor self_value;
  26884. optional<int64_t> self_bdim;
  26885. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26886. auto results = batch_rule(self_value, self_bdim, p);
  26887. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26888. }
  26889. template <typename batch_rule_t, batch_rule_t batch_rule>
  26890. at::Tensor linalg_cond_p_str_generated_plumbing(const at::Tensor & self, c10::string_view p) {
  26891. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26892. auto maybe_layer = maybeCurrentDynamicLayer();
  26893. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26894. int64_t cur_level = maybe_layer->layerId();
  26895. if (!isBatchedAtLevel(self, cur_level)) {
  26896. return at::_ops::linalg_cond_p_str::call(self, p);
  26897. }
  26898. Tensor self_value;
  26899. optional<int64_t> self_bdim;
  26900. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26901. auto results = batch_rule(self_value, self_bdim, p);
  26902. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26903. }
  26904. template <typename batch_rule_t, batch_rule_t batch_rule>
  26905. at::Tensor linalg_pinv_atol_rtol_tensor_generated_plumbing(const at::Tensor & self, const c10::optional<at::Tensor> & atol, const c10::optional<at::Tensor> & rtol, bool hermitian) {
  26906. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26907. auto maybe_layer = maybeCurrentDynamicLayer();
  26908. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26909. int64_t cur_level = maybe_layer->layerId();
  26910. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(atol, cur_level) && !isBatchedAtLevel(rtol, cur_level)) {
  26911. return at::_ops::linalg_pinv_atol_rtol_tensor::call(self, atol, rtol, hermitian);
  26912. }
  26913. Tensor self_value;
  26914. optional<int64_t> self_bdim;
  26915. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26916. optional<Tensor> atol_value;
  26917. optional<int64_t> atol_bdim;
  26918. if (atol) {
  26919. std::tie(atol_value, atol_bdim) = unwrapTensorAtLevel(atol.value(), cur_level);
  26920. }
  26921. optional<Tensor> rtol_value;
  26922. optional<int64_t> rtol_bdim;
  26923. if (rtol) {
  26924. std::tie(rtol_value, rtol_bdim) = unwrapTensorAtLevel(rtol.value(), cur_level);
  26925. }
  26926. auto results = batch_rule(self_value, self_bdim, atol_value, atol_bdim, rtol_value, rtol_bdim, hermitian);
  26927. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26928. }
  26929. template <typename batch_rule_t, batch_rule_t batch_rule>
  26930. at::Tensor linalg_pinv_atol_rtol_float_generated_plumbing(const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian) {
  26931. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26932. auto maybe_layer = maybeCurrentDynamicLayer();
  26933. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26934. int64_t cur_level = maybe_layer->layerId();
  26935. if (!isBatchedAtLevel(self, cur_level)) {
  26936. return at::_ops::linalg_pinv_atol_rtol_float::call(self, atol, rtol, hermitian);
  26937. }
  26938. Tensor self_value;
  26939. optional<int64_t> self_bdim;
  26940. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26941. auto results = batch_rule(self_value, self_bdim, atol, rtol, hermitian);
  26942. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26943. }
  26944. template <typename batch_rule_t, batch_rule_t batch_rule>
  26945. at::Tensor linalg_pinv_generated_plumbing(const at::Tensor & self, double rcond, bool hermitian) {
  26946. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26947. auto maybe_layer = maybeCurrentDynamicLayer();
  26948. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26949. int64_t cur_level = maybe_layer->layerId();
  26950. if (!isBatchedAtLevel(self, cur_level)) {
  26951. return at::_ops::linalg_pinv::call(self, rcond, hermitian);
  26952. }
  26953. Tensor self_value;
  26954. optional<int64_t> self_bdim;
  26955. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26956. auto results = batch_rule(self_value, self_bdim, rcond, hermitian);
  26957. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26958. }
  26959. template <typename batch_rule_t, batch_rule_t batch_rule>
  26960. at::Tensor linalg_pinv_rcond_tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & rcond, bool hermitian) {
  26961. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26962. auto maybe_layer = maybeCurrentDynamicLayer();
  26963. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26964. int64_t cur_level = maybe_layer->layerId();
  26965. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(rcond, cur_level)) {
  26966. return at::_ops::linalg_pinv_rcond_tensor::call(self, rcond, hermitian);
  26967. }
  26968. Tensor self_value;
  26969. optional<int64_t> self_bdim;
  26970. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  26971. Tensor rcond_value;
  26972. optional<int64_t> rcond_bdim;
  26973. std::tie(rcond_value, rcond_bdim) = unwrapTensorAtLevel(rcond, cur_level);
  26974. auto results = batch_rule(self_value, self_bdim, rcond_value, rcond_bdim, hermitian);
  26975. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  26976. }
  26977. template <typename batch_rule_t, batch_rule_t batch_rule>
  26978. ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _linalg_solve_ex_generated_plumbing(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors) {
  26979. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26980. auto maybe_layer = maybeCurrentDynamicLayer();
  26981. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  26982. int64_t cur_level = maybe_layer->layerId();
  26983. if (!isBatchedAtLevel(A, cur_level) && !isBatchedAtLevel(B, cur_level)) {
  26984. return at::_ops::_linalg_solve_ex::call(A, B, left, check_errors);
  26985. }
  26986. Tensor A_value;
  26987. optional<int64_t> A_bdim;
  26988. std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
  26989. Tensor B_value;
  26990. optional<int64_t> B_bdim;
  26991. std::tie(B_value, B_bdim) = unwrapTensorAtLevel(B, cur_level);
  26992. auto results = batch_rule(A_value, A_bdim, B_value, B_bdim, left, check_errors);
  26993. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
  26994. }
  26995. template <typename batch_rule_t, batch_rule_t batch_rule>
  26996. ::std::tuple<at::Tensor,at::Tensor> linalg_solve_ex_generated_plumbing(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors) {
  26997. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  26998. auto maybe_layer = maybeCurrentDynamicLayer();
  26999. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27000. int64_t cur_level = maybe_layer->layerId();
  27001. if (!isBatchedAtLevel(A, cur_level) && !isBatchedAtLevel(B, cur_level)) {
  27002. return at::_ops::linalg_solve_ex::call(A, B, left, check_errors);
  27003. }
  27004. Tensor A_value;
  27005. optional<int64_t> A_bdim;
  27006. std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
  27007. Tensor B_value;
  27008. optional<int64_t> B_bdim;
  27009. std::tie(B_value, B_bdim) = unwrapTensorAtLevel(B, cur_level);
  27010. auto results = batch_rule(A_value, A_bdim, B_value, B_bdim, left, check_errors);
  27011. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  27012. }
  27013. template <typename batch_rule_t, batch_rule_t batch_rule>
  27014. at::Tensor linalg_solve_generated_plumbing(const at::Tensor & A, const at::Tensor & B, bool left) {
  27015. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27016. auto maybe_layer = maybeCurrentDynamicLayer();
  27017. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27018. int64_t cur_level = maybe_layer->layerId();
  27019. if (!isBatchedAtLevel(A, cur_level) && !isBatchedAtLevel(B, cur_level)) {
  27020. return at::_ops::linalg_solve::call(A, B, left);
  27021. }
  27022. Tensor A_value;
  27023. optional<int64_t> A_bdim;
  27024. std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
  27025. Tensor B_value;
  27026. optional<int64_t> B_bdim;
  27027. std::tie(B_value, B_bdim) = unwrapTensorAtLevel(B, cur_level);
  27028. auto results = batch_rule(A_value, A_bdim, B_value, B_bdim, left);
  27029. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27030. }
  27031. template <typename batch_rule_t, batch_rule_t batch_rule>
  27032. at::Tensor linalg_tensorinv_generated_plumbing(const at::Tensor & self, int64_t ind) {
  27033. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27034. auto maybe_layer = maybeCurrentDynamicLayer();
  27035. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27036. int64_t cur_level = maybe_layer->layerId();
  27037. if (!isBatchedAtLevel(self, cur_level)) {
  27038. return at::_ops::linalg_tensorinv::call(self, ind);
  27039. }
  27040. Tensor self_value;
  27041. optional<int64_t> self_bdim;
  27042. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27043. auto results = batch_rule(self_value, self_bdim, ind);
  27044. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27045. }
  27046. template <typename batch_rule_t, batch_rule_t batch_rule>
  27047. at::Tensor linalg_tensorsolve_generated_plumbing(const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims) {
  27048. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27049. auto maybe_layer = maybeCurrentDynamicLayer();
  27050. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27051. int64_t cur_level = maybe_layer->layerId();
  27052. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  27053. return at::_ops::linalg_tensorsolve::call(self, other, dims);
  27054. }
  27055. Tensor self_value;
  27056. optional<int64_t> self_bdim;
  27057. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27058. Tensor other_value;
  27059. optional<int64_t> other_bdim;
  27060. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  27061. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, dims);
  27062. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27063. }
  27064. template <typename batch_rule_t, batch_rule_t batch_rule>
  27065. ::std::tuple<at::Tensor,at::Tensor> linalg_qr_generated_plumbing(const at::Tensor & A, c10::string_view mode) {
  27066. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27067. auto maybe_layer = maybeCurrentDynamicLayer();
  27068. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27069. int64_t cur_level = maybe_layer->layerId();
  27070. if (!isBatchedAtLevel(A, cur_level)) {
  27071. return at::_ops::linalg_qr::call(A, mode);
  27072. }
  27073. Tensor A_value;
  27074. optional<int64_t> A_bdim;
  27075. std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
  27076. auto results = batch_rule(A_value, A_bdim, mode);
  27077. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  27078. }
  27079. template <typename batch_rule_t, batch_rule_t batch_rule>
  27080. at::Tensor linalg_matrix_power_generated_plumbing(const at::Tensor & self, int64_t n) {
  27081. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27082. auto maybe_layer = maybeCurrentDynamicLayer();
  27083. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27084. int64_t cur_level = maybe_layer->layerId();
  27085. if (!isBatchedAtLevel(self, cur_level)) {
  27086. return at::_ops::linalg_matrix_power::call(self, n);
  27087. }
  27088. Tensor self_value;
  27089. optional<int64_t> self_bdim;
  27090. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27091. auto results = batch_rule(self_value, self_bdim, n);
  27092. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27093. }
  27094. template <typename batch_rule_t, batch_rule_t batch_rule>
  27095. at::Tensor linalg_matrix_rank_atol_rtol_tensor_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & atol, const c10::optional<at::Tensor> & rtol, bool hermitian) {
  27096. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27097. auto maybe_layer = maybeCurrentDynamicLayer();
  27098. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27099. int64_t cur_level = maybe_layer->layerId();
  27100. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(atol, cur_level) && !isBatchedAtLevel(rtol, cur_level)) {
  27101. return at::_ops::linalg_matrix_rank_atol_rtol_tensor::call(input, atol, rtol, hermitian);
  27102. }
  27103. Tensor input_value;
  27104. optional<int64_t> input_bdim;
  27105. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  27106. optional<Tensor> atol_value;
  27107. optional<int64_t> atol_bdim;
  27108. if (atol) {
  27109. std::tie(atol_value, atol_bdim) = unwrapTensorAtLevel(atol.value(), cur_level);
  27110. }
  27111. optional<Tensor> rtol_value;
  27112. optional<int64_t> rtol_bdim;
  27113. if (rtol) {
  27114. std::tie(rtol_value, rtol_bdim) = unwrapTensorAtLevel(rtol.value(), cur_level);
  27115. }
  27116. auto results = batch_rule(input_value, input_bdim, atol_value, atol_bdim, rtol_value, rtol_bdim, hermitian);
  27117. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27118. }
  27119. template <typename batch_rule_t, batch_rule_t batch_rule>
  27120. at::Tensor linalg_matrix_rank_atol_rtol_float_generated_plumbing(const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian) {
  27121. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27122. auto maybe_layer = maybeCurrentDynamicLayer();
  27123. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27124. int64_t cur_level = maybe_layer->layerId();
  27125. if (!isBatchedAtLevel(self, cur_level)) {
  27126. return at::_ops::linalg_matrix_rank_atol_rtol_float::call(self, atol, rtol, hermitian);
  27127. }
  27128. Tensor self_value;
  27129. optional<int64_t> self_bdim;
  27130. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27131. auto results = batch_rule(self_value, self_bdim, atol, rtol, hermitian);
  27132. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27133. }
  27134. template <typename batch_rule_t, batch_rule_t batch_rule>
  27135. at::Tensor linalg_matrix_rank_generated_plumbing(const at::Tensor & self, double tol, bool hermitian) {
  27136. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27137. auto maybe_layer = maybeCurrentDynamicLayer();
  27138. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27139. int64_t cur_level = maybe_layer->layerId();
  27140. if (!isBatchedAtLevel(self, cur_level)) {
  27141. return at::_ops::linalg_matrix_rank::call(self, tol, hermitian);
  27142. }
  27143. Tensor self_value;
  27144. optional<int64_t> self_bdim;
  27145. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27146. auto results = batch_rule(self_value, self_bdim, tol, hermitian);
  27147. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27148. }
  27149. template <typename batch_rule_t, batch_rule_t batch_rule>
  27150. at::Tensor linalg_matrix_rank_tol_tensor_generated_plumbing(const at::Tensor & input, const at::Tensor & tol, bool hermitian) {
  27151. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27152. auto maybe_layer = maybeCurrentDynamicLayer();
  27153. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27154. int64_t cur_level = maybe_layer->layerId();
  27155. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(tol, cur_level)) {
  27156. return at::_ops::linalg_matrix_rank_tol_tensor::call(input, tol, hermitian);
  27157. }
  27158. Tensor input_value;
  27159. optional<int64_t> input_bdim;
  27160. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  27161. Tensor tol_value;
  27162. optional<int64_t> tol_bdim;
  27163. std::tie(tol_value, tol_bdim) = unwrapTensorAtLevel(tol, cur_level);
  27164. auto results = batch_rule(input_value, input_bdim, tol_value, tol_bdim, hermitian);
  27165. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27166. }
  27167. template <typename batch_rule_t, batch_rule_t batch_rule>
  27168. at::Tensor linalg_multi_dot_generated_plumbing(at::TensorList tensors) {
  27169. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27170. auto maybe_layer = maybeCurrentDynamicLayer();
  27171. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27172. int64_t cur_level = maybe_layer->layerId();
  27173. if (!isBatchedAtLevel(tensors, cur_level)) {
  27174. return at::_ops::linalg_multi_dot::call(tensors);
  27175. }
  27176. auto results = batch_rule(tensors);
  27177. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27178. }
  27179. template <typename batch_rule_t, batch_rule_t batch_rule>
  27180. at::Tensor nested_to_padded_tensor_generated_plumbing(const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size) {
  27181. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27182. auto maybe_layer = maybeCurrentDynamicLayer();
  27183. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27184. int64_t cur_level = maybe_layer->layerId();
  27185. if (!isBatchedAtLevel(self, cur_level)) {
  27186. return at::_ops::nested_to_padded_tensor::call(self, padding, output_size);
  27187. }
  27188. Tensor self_value;
  27189. optional<int64_t> self_bdim;
  27190. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27191. auto results = batch_rule(self_value, self_bdim, padding, output_size);
  27192. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27193. }
  27194. template <typename batch_rule_t, batch_rule_t batch_rule>
  27195. at::Tensor _test_serialization_subcmul_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
  27196. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27197. auto maybe_layer = maybeCurrentDynamicLayer();
  27198. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27199. int64_t cur_level = maybe_layer->layerId();
  27200. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
  27201. return at::_ops::_test_serialization_subcmul::call(self, other, alpha);
  27202. }
  27203. Tensor self_value;
  27204. optional<int64_t> self_bdim;
  27205. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27206. Tensor other_value;
  27207. optional<int64_t> other_bdim;
  27208. std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
  27209. auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
  27210. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27211. }
  27212. template <typename batch_rule_t, batch_rule_t batch_rule>
  27213. at::Tensor _test_optional_intlist_generated_plumbing(const at::Tensor & values, at::OptionalIntArrayRef addends) {
  27214. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27215. auto maybe_layer = maybeCurrentDynamicLayer();
  27216. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27217. int64_t cur_level = maybe_layer->layerId();
  27218. if (!isBatchedAtLevel(values, cur_level)) {
  27219. return at::_ops::_test_optional_intlist::call(values, addends);
  27220. }
  27221. Tensor values_value;
  27222. optional<int64_t> values_bdim;
  27223. std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
  27224. auto results = batch_rule(values_value, values_bdim, addends);
  27225. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27226. }
  27227. template <typename batch_rule_t, batch_rule_t batch_rule>
  27228. at::Tensor _test_optional_filled_intlist_generated_plumbing(const at::Tensor & values, at::OptionalIntArrayRef addends) {
  27229. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27230. auto maybe_layer = maybeCurrentDynamicLayer();
  27231. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27232. int64_t cur_level = maybe_layer->layerId();
  27233. if (!isBatchedAtLevel(values, cur_level)) {
  27234. return at::_ops::_test_optional_filled_intlist::call(values, addends);
  27235. }
  27236. Tensor values_value;
  27237. optional<int64_t> values_bdim;
  27238. std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
  27239. auto results = batch_rule(values_value, values_bdim, addends);
  27240. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27241. }
  27242. template <typename batch_rule_t, batch_rule_t batch_rule>
  27243. at::Tensor _test_optional_floatlist_generated_plumbing(const at::Tensor & values, c10::optional<at::ArrayRef<double>> addends) {
  27244. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27245. auto maybe_layer = maybeCurrentDynamicLayer();
  27246. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27247. int64_t cur_level = maybe_layer->layerId();
  27248. if (!isBatchedAtLevel(values, cur_level)) {
  27249. return at::_ops::_test_optional_floatlist::call(values, addends);
  27250. }
  27251. Tensor values_value;
  27252. optional<int64_t> values_bdim;
  27253. std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
  27254. auto results = batch_rule(values_value, values_bdim, addends);
  27255. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27256. }
  27257. template <typename batch_rule_t, batch_rule_t batch_rule>
  27258. at::Tensor _test_string_default_generated_plumbing(const at::Tensor & dummy, c10::string_view a, c10::string_view b) {
  27259. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27260. auto maybe_layer = maybeCurrentDynamicLayer();
  27261. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27262. int64_t cur_level = maybe_layer->layerId();
  27263. if (!isBatchedAtLevel(dummy, cur_level)) {
  27264. return at::_ops::_test_string_default::call(dummy, a, b);
  27265. }
  27266. Tensor dummy_value;
  27267. optional<int64_t> dummy_bdim;
  27268. std::tie(dummy_value, dummy_bdim) = unwrapTensorAtLevel(dummy, cur_level);
  27269. auto results = batch_rule(dummy_value, dummy_bdim, a, b);
  27270. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27271. }
  27272. template <typename batch_rule_t, batch_rule_t batch_rule>
  27273. at::Tensor _test_ambiguous_defaults_a_generated_plumbing(const at::Tensor & dummy, int64_t a, int64_t b) {
  27274. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27275. auto maybe_layer = maybeCurrentDynamicLayer();
  27276. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27277. int64_t cur_level = maybe_layer->layerId();
  27278. if (!isBatchedAtLevel(dummy, cur_level)) {
  27279. return at::_ops::_test_ambiguous_defaults_a::call(dummy, a, b);
  27280. }
  27281. Tensor dummy_value;
  27282. optional<int64_t> dummy_bdim;
  27283. std::tie(dummy_value, dummy_bdim) = unwrapTensorAtLevel(dummy, cur_level);
  27284. auto results = batch_rule(dummy_value, dummy_bdim, a, b);
  27285. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27286. }
  27287. template <typename batch_rule_t, batch_rule_t batch_rule>
  27288. at::Tensor _test_ambiguous_defaults_b_generated_plumbing(const at::Tensor & dummy, int64_t a, c10::string_view b) {
  27289. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27290. auto maybe_layer = maybeCurrentDynamicLayer();
  27291. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27292. int64_t cur_level = maybe_layer->layerId();
  27293. if (!isBatchedAtLevel(dummy, cur_level)) {
  27294. return at::_ops::_test_ambiguous_defaults_b::call(dummy, a, b);
  27295. }
  27296. Tensor dummy_value;
  27297. optional<int64_t> dummy_bdim;
  27298. std::tie(dummy_value, dummy_bdim) = unwrapTensorAtLevel(dummy, cur_level);
  27299. auto results = batch_rule(dummy_value, dummy_bdim, a, b);
  27300. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27301. }
  27302. template <typename batch_rule_t, batch_rule_t batch_rule>
  27303. at::Tensor _test_warn_in_autograd_generated_plumbing(const at::Tensor & self) {
  27304. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27305. auto maybe_layer = maybeCurrentDynamicLayer();
  27306. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27307. int64_t cur_level = maybe_layer->layerId();
  27308. if (!isBatchedAtLevel(self, cur_level)) {
  27309. return at::_ops::_test_warn_in_autograd::call(self);
  27310. }
  27311. Tensor self_value;
  27312. optional<int64_t> self_bdim;
  27313. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27314. auto results = batch_rule(self_value, self_bdim);
  27315. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27316. }
  27317. template <typename batch_rule_t, batch_rule_t batch_rule>
  27318. at::Tensor _test_autograd_multiple_dispatch_fullcoverage_generated_plumbing(const at::Tensor & self) {
  27319. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27320. auto maybe_layer = maybeCurrentDynamicLayer();
  27321. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27322. int64_t cur_level = maybe_layer->layerId();
  27323. if (!isBatchedAtLevel(self, cur_level)) {
  27324. return at::_ops::_test_autograd_multiple_dispatch_fullcoverage::call(self);
  27325. }
  27326. Tensor self_value;
  27327. optional<int64_t> self_bdim;
  27328. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27329. auto results = batch_rule(self_value, self_bdim);
  27330. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27331. }
  27332. template <typename batch_rule_t, batch_rule_t batch_rule>
  27333. at::Tensor _test_autograd_multiple_dispatch_ntonly_generated_plumbing(const at::Tensor & self, bool b) {
  27334. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27335. auto maybe_layer = maybeCurrentDynamicLayer();
  27336. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27337. int64_t cur_level = maybe_layer->layerId();
  27338. if (!isBatchedAtLevel(self, cur_level)) {
  27339. return at::_ops::_test_autograd_multiple_dispatch_ntonly::call(self, b);
  27340. }
  27341. Tensor self_value;
  27342. optional<int64_t> self_bdim;
  27343. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27344. auto results = batch_rule(self_value, self_bdim, b);
  27345. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27346. }
  27347. template <typename batch_rule_t, batch_rule_t batch_rule>
  27348. at::Tensor _test_autograd_multiple_dispatch_view_generated_plumbing(const at::Tensor & self) {
  27349. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27350. auto maybe_layer = maybeCurrentDynamicLayer();
  27351. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27352. int64_t cur_level = maybe_layer->layerId();
  27353. if (!isBatchedAtLevel(self, cur_level)) {
  27354. return at::_ops::_test_autograd_multiple_dispatch_view::call(self);
  27355. }
  27356. Tensor self_value;
  27357. optional<int64_t> self_bdim;
  27358. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27359. auto results = batch_rule(self_value, self_bdim);
  27360. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27361. }
  27362. template <typename batch_rule_t, batch_rule_t batch_rule>
  27363. at::Tensor _test_autograd_multiple_dispatch_view_copy_generated_plumbing(const at::Tensor & self) {
  27364. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27365. auto maybe_layer = maybeCurrentDynamicLayer();
  27366. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27367. int64_t cur_level = maybe_layer->layerId();
  27368. if (!isBatchedAtLevel(self, cur_level)) {
  27369. return at::_ops::_test_autograd_multiple_dispatch_view_copy::call(self);
  27370. }
  27371. Tensor self_value;
  27372. optional<int64_t> self_bdim;
  27373. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27374. auto results = batch_rule(self_value, self_bdim);
  27375. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27376. }
  27377. template <typename batch_rule_t, batch_rule_t batch_rule>
  27378. at::Tensor segment_reduce_generated_plumbing(const at::Tensor & data, c10::string_view reduce, const c10::optional<at::Tensor> & lengths, const c10::optional<at::Tensor> & indices, const c10::optional<at::Tensor> & offsets, int64_t axis, bool unsafe, const c10::optional<at::Scalar> & initial) {
  27379. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27380. auto maybe_layer = maybeCurrentDynamicLayer();
  27381. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27382. int64_t cur_level = maybe_layer->layerId();
  27383. if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(lengths, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level)) {
  27384. return at::_ops::segment_reduce::call(data, reduce, lengths, indices, offsets, axis, unsafe, initial);
  27385. }
  27386. Tensor data_value;
  27387. optional<int64_t> data_bdim;
  27388. std::tie(data_value, data_bdim) = unwrapTensorAtLevel(data, cur_level);
  27389. optional<Tensor> lengths_value;
  27390. optional<int64_t> lengths_bdim;
  27391. if (lengths) {
  27392. std::tie(lengths_value, lengths_bdim) = unwrapTensorAtLevel(lengths.value(), cur_level);
  27393. }
  27394. optional<Tensor> indices_value;
  27395. optional<int64_t> indices_bdim;
  27396. if (indices) {
  27397. std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices.value(), cur_level);
  27398. }
  27399. optional<Tensor> offsets_value;
  27400. optional<int64_t> offsets_bdim;
  27401. if (offsets) {
  27402. std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets.value(), cur_level);
  27403. }
  27404. auto results = batch_rule(data_value, data_bdim, reduce, lengths_value, lengths_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, axis, unsafe, initial);
  27405. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27406. }
  27407. template <typename batch_rule_t, batch_rule_t batch_rule>
  27408. at::Tensor _segment_reduce_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const c10::optional<at::Tensor> & lengths, const c10::optional<at::Tensor> & offsets, int64_t axis, const c10::optional<at::Scalar> & initial) {
  27409. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27410. auto maybe_layer = maybeCurrentDynamicLayer();
  27411. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27412. int64_t cur_level = maybe_layer->layerId();
  27413. if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(lengths, cur_level) && !isBatchedAtLevel(offsets, cur_level)) {
  27414. return at::_ops::_segment_reduce_backward::call(grad, output, data, reduce, lengths, offsets, axis, initial);
  27415. }
  27416. Tensor grad_value;
  27417. optional<int64_t> grad_bdim;
  27418. std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
  27419. Tensor output_value;
  27420. optional<int64_t> output_bdim;
  27421. std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
  27422. Tensor data_value;
  27423. optional<int64_t> data_bdim;
  27424. std::tie(data_value, data_bdim) = unwrapTensorAtLevel(data, cur_level);
  27425. optional<Tensor> lengths_value;
  27426. optional<int64_t> lengths_bdim;
  27427. if (lengths) {
  27428. std::tie(lengths_value, lengths_bdim) = unwrapTensorAtLevel(lengths.value(), cur_level);
  27429. }
  27430. optional<Tensor> offsets_value;
  27431. optional<int64_t> offsets_bdim;
  27432. if (offsets) {
  27433. std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets.value(), cur_level);
  27434. }
  27435. auto results = batch_rule(grad_value, grad_bdim, output_value, output_bdim, data_value, data_bdim, reduce, lengths_value, lengths_bdim, offsets_value, offsets_bdim, axis, initial);
  27436. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27437. }
  27438. template <typename batch_rule_t, batch_rule_t batch_rule>
  27439. at::Tensor pad_sequence_generated_plumbing(at::TensorList sequences, bool batch_first, double padding_value) {
  27440. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27441. auto maybe_layer = maybeCurrentDynamicLayer();
  27442. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27443. int64_t cur_level = maybe_layer->layerId();
  27444. if (!isBatchedAtLevel(sequences, cur_level)) {
  27445. return at::_ops::pad_sequence::call(sequences, batch_first, padding_value);
  27446. }
  27447. auto results = batch_rule(sequences, batch_first, padding_value);
  27448. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27449. }
  27450. template <typename batch_rule_t, batch_rule_t batch_rule>
  27451. at::Tensor flatten_dense_tensors_generated_plumbing(at::TensorList tensors) {
  27452. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27453. auto maybe_layer = maybeCurrentDynamicLayer();
  27454. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27455. int64_t cur_level = maybe_layer->layerId();
  27456. if (!isBatchedAtLevel(tensors, cur_level)) {
  27457. return at::_ops::flatten_dense_tensors::call(tensors);
  27458. }
  27459. auto results = batch_rule(tensors);
  27460. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27461. }
  27462. template <typename batch_rule_t, batch_rule_t batch_rule>
  27463. ::std::vector<at::Tensor> unflatten_dense_tensors_generated_plumbing(const at::Tensor & flat, at::TensorList tensors) {
  27464. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27465. auto maybe_layer = maybeCurrentDynamicLayer();
  27466. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27467. int64_t cur_level = maybe_layer->layerId();
  27468. if (!isBatchedAtLevel(flat, cur_level) && !isBatchedAtLevel(tensors, cur_level)) {
  27469. return at::_ops::unflatten_dense_tensors::call(flat, tensors);
  27470. }
  27471. Tensor flat_value;
  27472. optional<int64_t> flat_bdim;
  27473. std::tie(flat_value, flat_bdim) = unwrapTensorAtLevel(flat, cur_level);
  27474. auto results = batch_rule(flat_value, flat_bdim, tensors);
  27475. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  27476. }
  27477. template <typename batch_rule_t, batch_rule_t batch_rule>
  27478. at::Tensor _nested_tensor_from_tensor_list_generated_plumbing(at::TensorList list, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
  27479. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27480. auto maybe_layer = maybeCurrentDynamicLayer();
  27481. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27482. int64_t cur_level = maybe_layer->layerId();
  27483. if (!isBatchedAtLevel(list, cur_level)) {
  27484. return at::_ops::_nested_tensor_from_tensor_list::call(list, dtype, layout, device, pin_memory);
  27485. }
  27486. auto results = batch_rule(list, dtype, layout, device, pin_memory);
  27487. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27488. }
  27489. template <typename batch_rule_t, batch_rule_t batch_rule>
  27490. at::Tensor _fw_primal_copy_generated_plumbing(const at::Tensor & self, int64_t level) {
  27491. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27492. auto maybe_layer = maybeCurrentDynamicLayer();
  27493. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27494. int64_t cur_level = maybe_layer->layerId();
  27495. if (!isBatchedAtLevel(self, cur_level)) {
  27496. return at::_ops::_fw_primal_copy::call(self, level);
  27497. }
  27498. Tensor self_value;
  27499. optional<int64_t> self_bdim;
  27500. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27501. auto results = batch_rule(self_value, self_bdim, level);
  27502. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27503. }
  27504. template <typename batch_rule_t, batch_rule_t batch_rule>
  27505. at::Tensor _make_dual_copy_generated_plumbing(const at::Tensor & primal, const at::Tensor & tangent, int64_t level) {
  27506. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27507. auto maybe_layer = maybeCurrentDynamicLayer();
  27508. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27509. int64_t cur_level = maybe_layer->layerId();
  27510. if (!isBatchedAtLevel(primal, cur_level) && !isBatchedAtLevel(tangent, cur_level)) {
  27511. return at::_ops::_make_dual_copy::call(primal, tangent, level);
  27512. }
  27513. Tensor primal_value;
  27514. optional<int64_t> primal_bdim;
  27515. std::tie(primal_value, primal_bdim) = unwrapTensorAtLevel(primal, cur_level);
  27516. Tensor tangent_value;
  27517. optional<int64_t> tangent_bdim;
  27518. std::tie(tangent_value, tangent_bdim) = unwrapTensorAtLevel(tangent, cur_level);
  27519. auto results = batch_rule(primal_value, primal_bdim, tangent_value, tangent_bdim, level);
  27520. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27521. }
  27522. template <typename batch_rule_t, batch_rule_t batch_rule>
  27523. at::Tensor view_as_real_copy_generated_plumbing(const at::Tensor & self) {
  27524. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27525. auto maybe_layer = maybeCurrentDynamicLayer();
  27526. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27527. int64_t cur_level = maybe_layer->layerId();
  27528. if (!isBatchedAtLevel(self, cur_level)) {
  27529. return at::_ops::view_as_real_copy::call(self);
  27530. }
  27531. Tensor self_value;
  27532. optional<int64_t> self_bdim;
  27533. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27534. auto results = batch_rule(self_value, self_bdim);
  27535. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27536. }
  27537. template <typename batch_rule_t, batch_rule_t batch_rule>
  27538. at::Tensor view_as_complex_copy_generated_plumbing(const at::Tensor & self) {
  27539. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27540. auto maybe_layer = maybeCurrentDynamicLayer();
  27541. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27542. int64_t cur_level = maybe_layer->layerId();
  27543. if (!isBatchedAtLevel(self, cur_level)) {
  27544. return at::_ops::view_as_complex_copy::call(self);
  27545. }
  27546. Tensor self_value;
  27547. optional<int64_t> self_bdim;
  27548. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27549. auto results = batch_rule(self_value, self_bdim);
  27550. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27551. }
  27552. template <typename batch_rule_t, batch_rule_t batch_rule>
  27553. at::Tensor _conj_copy_generated_plumbing(const at::Tensor & self) {
  27554. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27555. auto maybe_layer = maybeCurrentDynamicLayer();
  27556. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27557. int64_t cur_level = maybe_layer->layerId();
  27558. if (!isBatchedAtLevel(self, cur_level)) {
  27559. return at::_ops::_conj_copy::call(self);
  27560. }
  27561. Tensor self_value;
  27562. optional<int64_t> self_bdim;
  27563. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27564. auto results = batch_rule(self_value, self_bdim);
  27565. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27566. }
  27567. template <typename batch_rule_t, batch_rule_t batch_rule>
  27568. at::Tensor _neg_view_copy_generated_plumbing(const at::Tensor & self) {
  27569. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27570. auto maybe_layer = maybeCurrentDynamicLayer();
  27571. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27572. int64_t cur_level = maybe_layer->layerId();
  27573. if (!isBatchedAtLevel(self, cur_level)) {
  27574. return at::_ops::_neg_view_copy::call(self);
  27575. }
  27576. Tensor self_value;
  27577. optional<int64_t> self_bdim;
  27578. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27579. auto results = batch_rule(self_value, self_bdim);
  27580. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27581. }
  27582. template <typename batch_rule_t, batch_rule_t batch_rule>
  27583. at::Tensor as_strided_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) {
  27584. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27585. auto maybe_layer = maybeCurrentDynamicLayer();
  27586. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27587. int64_t cur_level = maybe_layer->layerId();
  27588. if (!isBatchedAtLevel(self, cur_level)) {
  27589. return at::_ops::as_strided_copy::call(self, size, stride, storage_offset);
  27590. }
  27591. Tensor self_value;
  27592. optional<int64_t> self_bdim;
  27593. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27594. auto results = batch_rule(self_value, self_bdim, size, stride, storage_offset);
  27595. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27596. }
  27597. template <typename batch_rule_t, batch_rule_t batch_rule>
  27598. at::Tensor _sparse_broadcast_to_copy_generated_plumbing(const at::Tensor & self, at::IntArrayRef size) {
  27599. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27600. auto maybe_layer = maybeCurrentDynamicLayer();
  27601. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27602. int64_t cur_level = maybe_layer->layerId();
  27603. if (!isBatchedAtLevel(self, cur_level)) {
  27604. return at::_ops::_sparse_broadcast_to_copy::call(self, size);
  27605. }
  27606. Tensor self_value;
  27607. optional<int64_t> self_bdim;
  27608. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27609. auto results = batch_rule(self_value, self_bdim, size);
  27610. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27611. }
  27612. template <typename batch_rule_t, batch_rule_t batch_rule>
  27613. at::Tensor diagonal_copy_generated_plumbing(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) {
  27614. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27615. auto maybe_layer = maybeCurrentDynamicLayer();
  27616. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27617. int64_t cur_level = maybe_layer->layerId();
  27618. if (!isBatchedAtLevel(self, cur_level)) {
  27619. return at::_ops::diagonal_copy::call(self, offset, dim1, dim2);
  27620. }
  27621. Tensor self_value;
  27622. optional<int64_t> self_bdim;
  27623. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27624. auto results = batch_rule(self_value, self_bdim, offset, dim1, dim2);
  27625. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27626. }
  27627. template <typename batch_rule_t, batch_rule_t batch_rule>
  27628. at::Tensor expand_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit) {
  27629. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27630. auto maybe_layer = maybeCurrentDynamicLayer();
  27631. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27632. int64_t cur_level = maybe_layer->layerId();
  27633. if (!isBatchedAtLevel(self, cur_level)) {
  27634. return at::_ops::expand_copy::call(self, size, implicit);
  27635. }
  27636. Tensor self_value;
  27637. optional<int64_t> self_bdim;
  27638. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27639. auto results = batch_rule(self_value, self_bdim, size, implicit);
  27640. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27641. }
  27642. template <typename batch_rule_t, batch_rule_t batch_rule>
  27643. at::Tensor permute_copy_generated_plumbing(const at::Tensor & self, at::IntArrayRef dims) {
  27644. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27645. auto maybe_layer = maybeCurrentDynamicLayer();
  27646. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27647. int64_t cur_level = maybe_layer->layerId();
  27648. if (!isBatchedAtLevel(self, cur_level)) {
  27649. return at::_ops::permute_copy::call(self, dims);
  27650. }
  27651. Tensor self_value;
  27652. optional<int64_t> self_bdim;
  27653. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27654. auto results = batch_rule(self_value, self_bdim, dims);
  27655. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27656. }
  27657. template <typename batch_rule_t, batch_rule_t batch_rule>
  27658. at::Tensor _reshape_alias_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
  27659. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27660. auto maybe_layer = maybeCurrentDynamicLayer();
  27661. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27662. int64_t cur_level = maybe_layer->layerId();
  27663. if (!isBatchedAtLevel(self, cur_level)) {
  27664. return at::_ops::_reshape_alias_copy::call(self, size, stride);
  27665. }
  27666. Tensor self_value;
  27667. optional<int64_t> self_bdim;
  27668. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27669. auto results = batch_rule(self_value, self_bdim, size, stride);
  27670. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27671. }
  27672. template <typename batch_rule_t, batch_rule_t batch_rule>
  27673. at::Tensor select_copy_int_generated_plumbing(const at::Tensor & self, int64_t dim, c10::SymInt index) {
  27674. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27675. auto maybe_layer = maybeCurrentDynamicLayer();
  27676. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27677. int64_t cur_level = maybe_layer->layerId();
  27678. if (!isBatchedAtLevel(self, cur_level)) {
  27679. return at::_ops::select_copy_int::call(self, dim, index);
  27680. }
  27681. Tensor self_value;
  27682. optional<int64_t> self_bdim;
  27683. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27684. auto results = batch_rule(self_value, self_bdim, dim, index);
  27685. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27686. }
  27687. template <typename batch_rule_t, batch_rule_t batch_rule>
  27688. at::Tensor detach_copy_generated_plumbing(const at::Tensor & self) {
  27689. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27690. auto maybe_layer = maybeCurrentDynamicLayer();
  27691. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27692. int64_t cur_level = maybe_layer->layerId();
  27693. if (!isBatchedAtLevel(self, cur_level)) {
  27694. return at::_ops::detach_copy::call(self);
  27695. }
  27696. Tensor self_value;
  27697. optional<int64_t> self_bdim;
  27698. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27699. auto results = batch_rule(self_value, self_bdim);
  27700. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27701. }
  27702. template <typename batch_rule_t, batch_rule_t batch_rule>
  27703. at::Tensor slice_copy_Tensor_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) {
  27704. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27705. auto maybe_layer = maybeCurrentDynamicLayer();
  27706. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27707. int64_t cur_level = maybe_layer->layerId();
  27708. if (!isBatchedAtLevel(self, cur_level)) {
  27709. return at::_ops::slice_copy_Tensor::call(self, dim, start, end, step);
  27710. }
  27711. Tensor self_value;
  27712. optional<int64_t> self_bdim;
  27713. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27714. auto results = batch_rule(self_value, self_bdim, dim, start, end, step);
  27715. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27716. }
  27717. template <typename batch_rule_t, batch_rule_t batch_rule>
  27718. ::std::vector<at::Tensor> split_copy_Tensor_generated_plumbing(const at::Tensor & self, c10::SymInt split_size, int64_t dim) {
  27719. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27720. auto maybe_layer = maybeCurrentDynamicLayer();
  27721. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27722. int64_t cur_level = maybe_layer->layerId();
  27723. if (!isBatchedAtLevel(self, cur_level)) {
  27724. return at::_ops::split_copy_Tensor::call(self, split_size, dim);
  27725. }
  27726. Tensor self_value;
  27727. optional<int64_t> self_bdim;
  27728. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27729. auto results = batch_rule(self_value, self_bdim, split_size, dim);
  27730. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  27731. }
  27732. template <typename batch_rule_t, batch_rule_t batch_rule>
  27733. ::std::vector<at::Tensor> split_with_sizes_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) {
  27734. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27735. auto maybe_layer = maybeCurrentDynamicLayer();
  27736. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27737. int64_t cur_level = maybe_layer->layerId();
  27738. if (!isBatchedAtLevel(self, cur_level)) {
  27739. return at::_ops::split_with_sizes_copy::call(self, split_sizes, dim);
  27740. }
  27741. Tensor self_value;
  27742. optional<int64_t> self_bdim;
  27743. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27744. auto results = batch_rule(self_value, self_bdim, split_sizes, dim);
  27745. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  27746. }
  27747. template <typename batch_rule_t, batch_rule_t batch_rule>
  27748. at::Tensor squeeze_copy_generated_plumbing(const at::Tensor & self) {
  27749. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27750. auto maybe_layer = maybeCurrentDynamicLayer();
  27751. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27752. int64_t cur_level = maybe_layer->layerId();
  27753. if (!isBatchedAtLevel(self, cur_level)) {
  27754. return at::_ops::squeeze_copy::call(self);
  27755. }
  27756. Tensor self_value;
  27757. optional<int64_t> self_bdim;
  27758. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27759. auto results = batch_rule(self_value, self_bdim);
  27760. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27761. }
  27762. template <typename batch_rule_t, batch_rule_t batch_rule>
  27763. at::Tensor squeeze_copy_dim_generated_plumbing(const at::Tensor & self, int64_t dim) {
  27764. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27765. auto maybe_layer = maybeCurrentDynamicLayer();
  27766. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27767. int64_t cur_level = maybe_layer->layerId();
  27768. if (!isBatchedAtLevel(self, cur_level)) {
  27769. return at::_ops::squeeze_copy_dim::call(self, dim);
  27770. }
  27771. Tensor self_value;
  27772. optional<int64_t> self_bdim;
  27773. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27774. auto results = batch_rule(self_value, self_bdim, dim);
  27775. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27776. }
  27777. template <typename batch_rule_t, batch_rule_t batch_rule>
  27778. at::Tensor squeeze_copy_dims_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim) {
  27779. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27780. auto maybe_layer = maybeCurrentDynamicLayer();
  27781. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27782. int64_t cur_level = maybe_layer->layerId();
  27783. if (!isBatchedAtLevel(self, cur_level)) {
  27784. return at::_ops::squeeze_copy_dims::call(self, dim);
  27785. }
  27786. Tensor self_value;
  27787. optional<int64_t> self_bdim;
  27788. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27789. auto results = batch_rule(self_value, self_bdim, dim);
  27790. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27791. }
  27792. template <typename batch_rule_t, batch_rule_t batch_rule>
  27793. at::Tensor t_copy_generated_plumbing(const at::Tensor & self) {
  27794. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27795. auto maybe_layer = maybeCurrentDynamicLayer();
  27796. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27797. int64_t cur_level = maybe_layer->layerId();
  27798. if (!isBatchedAtLevel(self, cur_level)) {
  27799. return at::_ops::t_copy::call(self);
  27800. }
  27801. Tensor self_value;
  27802. optional<int64_t> self_bdim;
  27803. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27804. auto results = batch_rule(self_value, self_bdim);
  27805. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27806. }
  27807. template <typename batch_rule_t, batch_rule_t batch_rule>
  27808. at::Tensor transpose_copy_int_generated_plumbing(const at::Tensor & self, int64_t dim0, int64_t dim1) {
  27809. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27810. auto maybe_layer = maybeCurrentDynamicLayer();
  27811. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27812. int64_t cur_level = maybe_layer->layerId();
  27813. if (!isBatchedAtLevel(self, cur_level)) {
  27814. return at::_ops::transpose_copy_int::call(self, dim0, dim1);
  27815. }
  27816. Tensor self_value;
  27817. optional<int64_t> self_bdim;
  27818. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27819. auto results = batch_rule(self_value, self_bdim, dim0, dim1);
  27820. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27821. }
  27822. template <typename batch_rule_t, batch_rule_t batch_rule>
  27823. at::Tensor unsqueeze_copy_generated_plumbing(const at::Tensor & self, int64_t dim) {
  27824. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27825. auto maybe_layer = maybeCurrentDynamicLayer();
  27826. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27827. int64_t cur_level = maybe_layer->layerId();
  27828. if (!isBatchedAtLevel(self, cur_level)) {
  27829. return at::_ops::unsqueeze_copy::call(self, dim);
  27830. }
  27831. Tensor self_value;
  27832. optional<int64_t> self_bdim;
  27833. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27834. auto results = batch_rule(self_value, self_bdim, dim);
  27835. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27836. }
  27837. template <typename batch_rule_t, batch_rule_t batch_rule>
  27838. at::Tensor _indices_copy_generated_plumbing(const at::Tensor & self) {
  27839. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27840. auto maybe_layer = maybeCurrentDynamicLayer();
  27841. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27842. int64_t cur_level = maybe_layer->layerId();
  27843. if (!isBatchedAtLevel(self, cur_level)) {
  27844. return at::_ops::_indices_copy::call(self);
  27845. }
  27846. Tensor self_value;
  27847. optional<int64_t> self_bdim;
  27848. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27849. auto results = batch_rule(self_value, self_bdim);
  27850. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27851. }
  27852. template <typename batch_rule_t, batch_rule_t batch_rule>
  27853. at::Tensor _values_copy_generated_plumbing(const at::Tensor & self) {
  27854. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27855. auto maybe_layer = maybeCurrentDynamicLayer();
  27856. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27857. int64_t cur_level = maybe_layer->layerId();
  27858. if (!isBatchedAtLevel(self, cur_level)) {
  27859. return at::_ops::_values_copy::call(self);
  27860. }
  27861. Tensor self_value;
  27862. optional<int64_t> self_bdim;
  27863. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27864. auto results = batch_rule(self_value, self_bdim);
  27865. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27866. }
  27867. template <typename batch_rule_t, batch_rule_t batch_rule>
  27868. at::Tensor indices_copy_generated_plumbing(const at::Tensor & self) {
  27869. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27870. auto maybe_layer = maybeCurrentDynamicLayer();
  27871. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27872. int64_t cur_level = maybe_layer->layerId();
  27873. if (!isBatchedAtLevel(self, cur_level)) {
  27874. return at::_ops::indices_copy::call(self);
  27875. }
  27876. Tensor self_value;
  27877. optional<int64_t> self_bdim;
  27878. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27879. auto results = batch_rule(self_value, self_bdim);
  27880. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27881. }
  27882. template <typename batch_rule_t, batch_rule_t batch_rule>
  27883. at::Tensor values_copy_generated_plumbing(const at::Tensor & self) {
  27884. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27885. auto maybe_layer = maybeCurrentDynamicLayer();
  27886. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27887. int64_t cur_level = maybe_layer->layerId();
  27888. if (!isBatchedAtLevel(self, cur_level)) {
  27889. return at::_ops::values_copy::call(self);
  27890. }
  27891. Tensor self_value;
  27892. optional<int64_t> self_bdim;
  27893. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27894. auto results = batch_rule(self_value, self_bdim);
  27895. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27896. }
  27897. template <typename batch_rule_t, batch_rule_t batch_rule>
  27898. at::Tensor crow_indices_copy_generated_plumbing(const at::Tensor & self) {
  27899. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27900. auto maybe_layer = maybeCurrentDynamicLayer();
  27901. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27902. int64_t cur_level = maybe_layer->layerId();
  27903. if (!isBatchedAtLevel(self, cur_level)) {
  27904. return at::_ops::crow_indices_copy::call(self);
  27905. }
  27906. Tensor self_value;
  27907. optional<int64_t> self_bdim;
  27908. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27909. auto results = batch_rule(self_value, self_bdim);
  27910. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27911. }
  27912. template <typename batch_rule_t, batch_rule_t batch_rule>
  27913. at::Tensor col_indices_copy_generated_plumbing(const at::Tensor & self) {
  27914. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27915. auto maybe_layer = maybeCurrentDynamicLayer();
  27916. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27917. int64_t cur_level = maybe_layer->layerId();
  27918. if (!isBatchedAtLevel(self, cur_level)) {
  27919. return at::_ops::col_indices_copy::call(self);
  27920. }
  27921. Tensor self_value;
  27922. optional<int64_t> self_bdim;
  27923. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27924. auto results = batch_rule(self_value, self_bdim);
  27925. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27926. }
  27927. template <typename batch_rule_t, batch_rule_t batch_rule>
  27928. at::Tensor ccol_indices_copy_generated_plumbing(const at::Tensor & self) {
  27929. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27930. auto maybe_layer = maybeCurrentDynamicLayer();
  27931. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27932. int64_t cur_level = maybe_layer->layerId();
  27933. if (!isBatchedAtLevel(self, cur_level)) {
  27934. return at::_ops::ccol_indices_copy::call(self);
  27935. }
  27936. Tensor self_value;
  27937. optional<int64_t> self_bdim;
  27938. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27939. auto results = batch_rule(self_value, self_bdim);
  27940. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27941. }
  27942. template <typename batch_rule_t, batch_rule_t batch_rule>
  27943. at::Tensor row_indices_copy_generated_plumbing(const at::Tensor & self) {
  27944. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27945. auto maybe_layer = maybeCurrentDynamicLayer();
  27946. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27947. int64_t cur_level = maybe_layer->layerId();
  27948. if (!isBatchedAtLevel(self, cur_level)) {
  27949. return at::_ops::row_indices_copy::call(self);
  27950. }
  27951. Tensor self_value;
  27952. optional<int64_t> self_bdim;
  27953. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27954. auto results = batch_rule(self_value, self_bdim);
  27955. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  27956. }
  27957. template <typename batch_rule_t, batch_rule_t batch_rule>
  27958. ::std::vector<at::Tensor> unbind_copy_int_generated_plumbing(const at::Tensor & self, int64_t dim) {
  27959. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27960. auto maybe_layer = maybeCurrentDynamicLayer();
  27961. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  27962. int64_t cur_level = maybe_layer->layerId();
  27963. if (!isBatchedAtLevel(self, cur_level)) {
  27964. return at::_ops::unbind_copy_int::call(self, dim);
  27965. }
  27966. Tensor self_value;
  27967. optional<int64_t> self_bdim;
  27968. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27969. auto results = batch_rule(self_value, self_bdim, dim);
  27970. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  27971. }
  27972. template <typename batch_rule_t, batch_rule_t batch_rule>
  27973. void unbind_copy_int_out_generated_plumbing(const at::Tensor & self, int64_t dim, at::TensorList out) {
  27974. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27975. auto maybe_layer = maybeCurrentDynamicLayer();
  27976. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  27977. int64_t cur_level = maybe_layer->layerId();
  27978. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  27979. return at::_ops::unbind_copy_int_out::call(self, dim, out);
  27980. }
  27981. Tensor self_value;
  27982. optional<int64_t> self_bdim;
  27983. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27984. batch_rule(self_value, self_bdim, dim, out);
  27985. }
  27986. template <typename batch_rule_t, batch_rule_t batch_rule>
  27987. void split_copy_Tensor_out_generated_plumbing(const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out) {
  27988. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  27989. auto maybe_layer = maybeCurrentDynamicLayer();
  27990. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  27991. int64_t cur_level = maybe_layer->layerId();
  27992. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  27993. return at::_ops::split_copy_Tensor_out::call(self, split_size, dim, out);
  27994. }
  27995. Tensor self_value;
  27996. optional<int64_t> self_bdim;
  27997. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  27998. batch_rule(self_value, self_bdim, split_size, dim, out);
  27999. }
  28000. template <typename batch_rule_t, batch_rule_t batch_rule>
  28001. void split_with_sizes_copy_out_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) {
  28002. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28003. auto maybe_layer = maybeCurrentDynamicLayer();
  28004. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  28005. int64_t cur_level = maybe_layer->layerId();
  28006. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  28007. return at::_ops::split_with_sizes_copy_out::call(self, split_sizes, dim, out);
  28008. }
  28009. Tensor self_value;
  28010. optional<int64_t> self_bdim;
  28011. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  28012. batch_rule(self_value, self_bdim, split_sizes, dim, out);
  28013. }
  28014. template <typename batch_rule_t, batch_rule_t batch_rule>
  28015. at::Tensor view_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size) {
  28016. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28017. auto maybe_layer = maybeCurrentDynamicLayer();
  28018. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28019. int64_t cur_level = maybe_layer->layerId();
  28020. if (!isBatchedAtLevel(self, cur_level)) {
  28021. return at::_ops::view_copy::call(self, size);
  28022. }
  28023. Tensor self_value;
  28024. optional<int64_t> self_bdim;
  28025. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  28026. auto results = batch_rule(self_value, self_bdim, size);
  28027. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  28028. }
  28029. template <typename batch_rule_t, batch_rule_t batch_rule>
  28030. at::Tensor view_copy_dtype_generated_plumbing(const at::Tensor & self, at::ScalarType dtype) {
  28031. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28032. auto maybe_layer = maybeCurrentDynamicLayer();
  28033. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28034. int64_t cur_level = maybe_layer->layerId();
  28035. if (!isBatchedAtLevel(self, cur_level)) {
  28036. return at::_ops::view_copy_dtype::call(self, dtype);
  28037. }
  28038. Tensor self_value;
  28039. optional<int64_t> self_bdim;
  28040. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  28041. auto results = batch_rule(self_value, self_bdim, dtype);
  28042. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  28043. }
  28044. template <typename batch_rule_t, batch_rule_t batch_rule>
  28045. at::Tensor unfold_copy_generated_plumbing(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) {
  28046. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28047. auto maybe_layer = maybeCurrentDynamicLayer();
  28048. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28049. int64_t cur_level = maybe_layer->layerId();
  28050. if (!isBatchedAtLevel(self, cur_level)) {
  28051. return at::_ops::unfold_copy::call(self, dimension, size, step);
  28052. }
  28053. Tensor self_value;
  28054. optional<int64_t> self_bdim;
  28055. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  28056. auto results = batch_rule(self_value, self_bdim, dimension, size, step);
  28057. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  28058. }
  28059. template <typename batch_rule_t, batch_rule_t batch_rule>
  28060. at::Tensor alias_copy_generated_plumbing(const at::Tensor & self) {
  28061. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28062. auto maybe_layer = maybeCurrentDynamicLayer();
  28063. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28064. int64_t cur_level = maybe_layer->layerId();
  28065. if (!isBatchedAtLevel(self, cur_level)) {
  28066. return at::_ops::alias_copy::call(self);
  28067. }
  28068. Tensor self_value;
  28069. optional<int64_t> self_bdim;
  28070. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  28071. auto results = batch_rule(self_value, self_bdim);
  28072. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  28073. }
  28074. template <typename batch_rule_t, batch_rule_t batch_rule>
  28075. at::Tensor to_padded_tensor_generated_plumbing(const at::Tensor & self, double padding, at::OptionalSymIntArrayRef output_size) {
  28076. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28077. auto maybe_layer = maybeCurrentDynamicLayer();
  28078. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28079. int64_t cur_level = maybe_layer->layerId();
  28080. if (!isBatchedAtLevel(self, cur_level)) {
  28081. return at::_ops::to_padded_tensor::call(self, padding, output_size);
  28082. }
  28083. Tensor self_value;
  28084. optional<int64_t> self_bdim;
  28085. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  28086. auto results = batch_rule(self_value, self_bdim, padding, output_size);
  28087. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  28088. }
  28089. template <typename batch_rule_t, batch_rule_t batch_rule>
  28090. at::Tensor _nested_tensor_softmax_with_shape_generated_plumbing(const at::Tensor & self, const at::Tensor & query) {
  28091. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28092. auto maybe_layer = maybeCurrentDynamicLayer();
  28093. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28094. int64_t cur_level = maybe_layer->layerId();
  28095. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(query, cur_level)) {
  28096. return at::_ops::_nested_tensor_softmax_with_shape::call(self, query);
  28097. }
  28098. Tensor self_value;
  28099. optional<int64_t> self_bdim;
  28100. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  28101. Tensor query_value;
  28102. optional<int64_t> query_bdim;
  28103. std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
  28104. auto results = batch_rule(self_value, self_bdim, query_value, query_bdim);
  28105. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  28106. }
  28107. template <typename batch_rule_t, batch_rule_t batch_rule>
  28108. at::Tensor _transformer_encoder_layer_fwd_generated_plumbing(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional<at::Tensor> & mask, c10::optional<int64_t> mask_type) {
  28109. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28110. auto maybe_layer = maybeCurrentDynamicLayer();
  28111. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28112. int64_t cur_level = maybe_layer->layerId();
  28113. if (!isBatchedAtLevel(src, cur_level) && !isBatchedAtLevel(qkv_weight, cur_level) && !isBatchedAtLevel(qkv_bias, cur_level) && !isBatchedAtLevel(proj_weight, cur_level) && !isBatchedAtLevel(proj_bias, cur_level) && !isBatchedAtLevel(norm_weight_1, cur_level) && !isBatchedAtLevel(norm_bias_1, cur_level) && !isBatchedAtLevel(norm_weight_2, cur_level) && !isBatchedAtLevel(norm_bias_2, cur_level) && !isBatchedAtLevel(ffn_weight_1, cur_level) && !isBatchedAtLevel(ffn_bias_1, cur_level) && !isBatchedAtLevel(ffn_weight_2, cur_level) && !isBatchedAtLevel(ffn_bias_2, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
  28114. return at::_ops::_transformer_encoder_layer_fwd::call(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, mask_type);
  28115. }
  28116. Tensor src_value;
  28117. optional<int64_t> src_bdim;
  28118. std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
  28119. Tensor qkv_weight_value;
  28120. optional<int64_t> qkv_weight_bdim;
  28121. std::tie(qkv_weight_value, qkv_weight_bdim) = unwrapTensorAtLevel(qkv_weight, cur_level);
  28122. Tensor qkv_bias_value;
  28123. optional<int64_t> qkv_bias_bdim;
  28124. std::tie(qkv_bias_value, qkv_bias_bdim) = unwrapTensorAtLevel(qkv_bias, cur_level);
  28125. Tensor proj_weight_value;
  28126. optional<int64_t> proj_weight_bdim;
  28127. std::tie(proj_weight_value, proj_weight_bdim) = unwrapTensorAtLevel(proj_weight, cur_level);
  28128. Tensor proj_bias_value;
  28129. optional<int64_t> proj_bias_bdim;
  28130. std::tie(proj_bias_value, proj_bias_bdim) = unwrapTensorAtLevel(proj_bias, cur_level);
  28131. Tensor norm_weight_1_value;
  28132. optional<int64_t> norm_weight_1_bdim;
  28133. std::tie(norm_weight_1_value, norm_weight_1_bdim) = unwrapTensorAtLevel(norm_weight_1, cur_level);
  28134. Tensor norm_bias_1_value;
  28135. optional<int64_t> norm_bias_1_bdim;
  28136. std::tie(norm_bias_1_value, norm_bias_1_bdim) = unwrapTensorAtLevel(norm_bias_1, cur_level);
  28137. Tensor norm_weight_2_value;
  28138. optional<int64_t> norm_weight_2_bdim;
  28139. std::tie(norm_weight_2_value, norm_weight_2_bdim) = unwrapTensorAtLevel(norm_weight_2, cur_level);
  28140. Tensor norm_bias_2_value;
  28141. optional<int64_t> norm_bias_2_bdim;
  28142. std::tie(norm_bias_2_value, norm_bias_2_bdim) = unwrapTensorAtLevel(norm_bias_2, cur_level);
  28143. Tensor ffn_weight_1_value;
  28144. optional<int64_t> ffn_weight_1_bdim;
  28145. std::tie(ffn_weight_1_value, ffn_weight_1_bdim) = unwrapTensorAtLevel(ffn_weight_1, cur_level);
  28146. Tensor ffn_bias_1_value;
  28147. optional<int64_t> ffn_bias_1_bdim;
  28148. std::tie(ffn_bias_1_value, ffn_bias_1_bdim) = unwrapTensorAtLevel(ffn_bias_1, cur_level);
  28149. Tensor ffn_weight_2_value;
  28150. optional<int64_t> ffn_weight_2_bdim;
  28151. std::tie(ffn_weight_2_value, ffn_weight_2_bdim) = unwrapTensorAtLevel(ffn_weight_2, cur_level);
  28152. Tensor ffn_bias_2_value;
  28153. optional<int64_t> ffn_bias_2_bdim;
  28154. std::tie(ffn_bias_2_value, ffn_bias_2_bdim) = unwrapTensorAtLevel(ffn_bias_2, cur_level);
  28155. optional<Tensor> mask_value;
  28156. optional<int64_t> mask_bdim;
  28157. if (mask) {
  28158. std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask.value(), cur_level);
  28159. }
  28160. auto results = batch_rule(src_value, src_bdim, embed_dim, num_heads, qkv_weight_value, qkv_weight_bdim, qkv_bias_value, qkv_bias_bdim, proj_weight_value, proj_weight_bdim, proj_bias_value, proj_bias_bdim, use_gelu, norm_first, eps, norm_weight_1_value, norm_weight_1_bdim, norm_bias_1_value, norm_bias_1_bdim, norm_weight_2_value, norm_weight_2_bdim, norm_bias_2_value, norm_bias_2_bdim, ffn_weight_1_value, ffn_weight_1_bdim, ffn_bias_1_value, ffn_bias_1_bdim, ffn_weight_2_value, ffn_weight_2_bdim, ffn_bias_2_value, ffn_bias_2_bdim, mask_value, mask_bdim, mask_type);
  28161. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  28162. }
  28163. template <typename batch_rule_t, batch_rule_t batch_rule>
  28164. ::std::tuple<at::Tensor,at::Tensor> _native_multi_head_attention_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask, bool need_weights, bool average_attn_weights, c10::optional<int64_t> mask_type) {
  28165. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28166. auto maybe_layer = maybeCurrentDynamicLayer();
  28167. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28168. int64_t cur_level = maybe_layer->layerId();
  28169. if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(qkv_weight, cur_level) && !isBatchedAtLevel(qkv_bias, cur_level) && !isBatchedAtLevel(proj_weight, cur_level) && !isBatchedAtLevel(proj_bias, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
  28170. return at::_ops::_native_multi_head_attention::call(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, need_weights, average_attn_weights, mask_type);
  28171. }
  28172. Tensor query_value;
  28173. optional<int64_t> query_bdim;
  28174. std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
  28175. Tensor key_value;
  28176. optional<int64_t> key_bdim;
  28177. std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
  28178. Tensor value_value;
  28179. optional<int64_t> value_bdim;
  28180. std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
  28181. Tensor qkv_weight_value;
  28182. optional<int64_t> qkv_weight_bdim;
  28183. std::tie(qkv_weight_value, qkv_weight_bdim) = unwrapTensorAtLevel(qkv_weight, cur_level);
  28184. Tensor qkv_bias_value;
  28185. optional<int64_t> qkv_bias_bdim;
  28186. std::tie(qkv_bias_value, qkv_bias_bdim) = unwrapTensorAtLevel(qkv_bias, cur_level);
  28187. Tensor proj_weight_value;
  28188. optional<int64_t> proj_weight_bdim;
  28189. std::tie(proj_weight_value, proj_weight_bdim) = unwrapTensorAtLevel(proj_weight, cur_level);
  28190. Tensor proj_bias_value;
  28191. optional<int64_t> proj_bias_bdim;
  28192. std::tie(proj_bias_value, proj_bias_bdim) = unwrapTensorAtLevel(proj_bias, cur_level);
  28193. optional<Tensor> mask_value;
  28194. optional<int64_t> mask_bdim;
  28195. if (mask) {
  28196. std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask.value(), cur_level);
  28197. }
  28198. auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, embed_dim, num_head, qkv_weight_value, qkv_weight_bdim, qkv_bias_value, qkv_bias_bdim, proj_weight_value, proj_weight_bdim, proj_bias_value, proj_bias_bdim, mask_value, mask_bdim, need_weights, average_attn_weights, mask_type);
  28199. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  28200. }
  28201. template <typename batch_rule_t, batch_rule_t batch_rule>
  28202. at::Tensor scaled_dot_product_attention_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal) {
  28203. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28204. auto maybe_layer = maybeCurrentDynamicLayer();
  28205. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28206. int64_t cur_level = maybe_layer->layerId();
  28207. if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(attn_mask, cur_level)) {
  28208. return at::_ops::scaled_dot_product_attention::call(query, key, value, attn_mask, dropout_p, is_causal);
  28209. }
  28210. Tensor query_value;
  28211. optional<int64_t> query_bdim;
  28212. std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
  28213. Tensor key_value;
  28214. optional<int64_t> key_bdim;
  28215. std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
  28216. Tensor value_value;
  28217. optional<int64_t> value_bdim;
  28218. std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
  28219. optional<Tensor> attn_mask_value;
  28220. optional<int64_t> attn_mask_bdim;
  28221. if (attn_mask) {
  28222. std::tie(attn_mask_value, attn_mask_bdim) = unwrapTensorAtLevel(attn_mask.value(), cur_level);
  28223. }
  28224. auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, attn_mask_value, attn_mask_bdim, dropout_p, is_causal);
  28225. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  28226. }
  28227. template <typename batch_rule_t, batch_rule_t batch_rule>
  28228. ::std::tuple<at::Tensor,at::Tensor> _scaled_dot_product_attention_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask, double dropout_p, bool need_attn_weights, bool is_causal) {
  28229. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28230. auto maybe_layer = maybeCurrentDynamicLayer();
  28231. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28232. int64_t cur_level = maybe_layer->layerId();
  28233. if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(attn_mask, cur_level)) {
  28234. return at::_ops::_scaled_dot_product_attention::call(query, key, value, attn_mask, dropout_p, need_attn_weights, is_causal);
  28235. }
  28236. Tensor query_value;
  28237. optional<int64_t> query_bdim;
  28238. std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
  28239. Tensor key_value;
  28240. optional<int64_t> key_bdim;
  28241. std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
  28242. Tensor value_value;
  28243. optional<int64_t> value_bdim;
  28244. std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
  28245. optional<Tensor> attn_mask_value;
  28246. optional<int64_t> attn_mask_bdim;
  28247. if (attn_mask) {
  28248. std::tie(attn_mask_value, attn_mask_bdim) = unwrapTensorAtLevel(attn_mask.value(), cur_level);
  28249. }
  28250. auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, attn_mask_value, attn_mask_bdim, dropout_p, need_attn_weights, is_causal);
  28251. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  28252. }
  28253. template <typename batch_rule_t, batch_rule_t batch_rule>
  28254. ::std::tuple<at::Tensor,at::Tensor> _scaled_dot_product_attention_math_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal, const c10::optional<at::Tensor> & dropout_mask) {
  28255. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28256. auto maybe_layer = maybeCurrentDynamicLayer();
  28257. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28258. int64_t cur_level = maybe_layer->layerId();
  28259. if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(attn_mask, cur_level) && !isBatchedAtLevel(dropout_mask, cur_level)) {
  28260. return at::_ops::_scaled_dot_product_attention_math::call(query, key, value, attn_mask, dropout_p, is_causal, dropout_mask);
  28261. }
  28262. Tensor query_value;
  28263. optional<int64_t> query_bdim;
  28264. std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
  28265. Tensor key_value;
  28266. optional<int64_t> key_bdim;
  28267. std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
  28268. Tensor value_value;
  28269. optional<int64_t> value_bdim;
  28270. std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
  28271. optional<Tensor> attn_mask_value;
  28272. optional<int64_t> attn_mask_bdim;
  28273. if (attn_mask) {
  28274. std::tie(attn_mask_value, attn_mask_bdim) = unwrapTensorAtLevel(attn_mask.value(), cur_level);
  28275. }
  28276. optional<Tensor> dropout_mask_value;
  28277. optional<int64_t> dropout_mask_bdim;
  28278. if (dropout_mask) {
  28279. std::tie(dropout_mask_value, dropout_mask_bdim) = unwrapTensorAtLevel(dropout_mask.value(), cur_level);
  28280. }
  28281. auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, attn_mask_value, attn_mask_bdim, dropout_p, is_causal, dropout_mask_value, dropout_mask_bdim);
  28282. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  28283. }
  28284. template <typename batch_rule_t, batch_rule_t batch_rule>
  28285. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_flash_attention_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal, int64_t philox_seed, int64_t philox_offset) {
  28286. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28287. auto maybe_layer = maybeCurrentDynamicLayer();
  28288. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28289. int64_t cur_level = maybe_layer->layerId();
  28290. if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(out, cur_level) && !isBatchedAtLevel(logsumexp, cur_level) && !isBatchedAtLevel(cum_seq_q, cur_level) && !isBatchedAtLevel(cum_seq_k, cur_level)) {
  28291. return at::_ops::_scaled_dot_product_flash_attention_backward::call(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset);
  28292. }
  28293. Tensor grad_out_value;
  28294. optional<int64_t> grad_out_bdim;
  28295. std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level);
  28296. Tensor query_value;
  28297. optional<int64_t> query_bdim;
  28298. std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
  28299. Tensor key_value;
  28300. optional<int64_t> key_bdim;
  28301. std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
  28302. Tensor value_value;
  28303. optional<int64_t> value_bdim;
  28304. std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
  28305. Tensor out_value;
  28306. optional<int64_t> out_bdim;
  28307. std::tie(out_value, out_bdim) = unwrapTensorAtLevel(out, cur_level);
  28308. Tensor logsumexp_value;
  28309. optional<int64_t> logsumexp_bdim;
  28310. std::tie(logsumexp_value, logsumexp_bdim) = unwrapTensorAtLevel(logsumexp, cur_level);
  28311. Tensor cum_seq_q_value;
  28312. optional<int64_t> cum_seq_q_bdim;
  28313. std::tie(cum_seq_q_value, cum_seq_q_bdim) = unwrapTensorAtLevel(cum_seq_q, cur_level);
  28314. Tensor cum_seq_k_value;
  28315. optional<int64_t> cum_seq_k_bdim;
  28316. std::tie(cum_seq_k_value, cum_seq_k_bdim) = unwrapTensorAtLevel(cum_seq_k, cur_level);
  28317. auto results = batch_rule(grad_out_value, grad_out_bdim, query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, out_value, out_bdim, logsumexp_value, logsumexp_bdim, cum_seq_q_value, cum_seq_q_bdim, cum_seq_k_value, cum_seq_k_bdim, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset);
  28318. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  28319. }
  28320. template <typename batch_rule_t, batch_rule_t batch_rule>
  28321. ::std::tuple<at::Tensor,at::Tensor> _scaled_dot_product_efficient_attention_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, bool compute_log_sumexp, bool is_causal) {
  28322. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28323. auto maybe_layer = maybeCurrentDynamicLayer();
  28324. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28325. int64_t cur_level = maybe_layer->layerId();
  28326. if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level)) {
  28327. return at::_ops::_scaled_dot_product_efficient_attention::call(query, key, value, compute_log_sumexp, is_causal);
  28328. }
  28329. Tensor query_value;
  28330. optional<int64_t> query_bdim;
  28331. std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
  28332. Tensor key_value;
  28333. optional<int64_t> key_bdim;
  28334. std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
  28335. Tensor value_value;
  28336. optional<int64_t> value_bdim;
  28337. std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
  28338. auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, compute_log_sumexp, is_causal);
  28339. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  28340. }
  28341. template <typename batch_rule_t, batch_rule_t batch_rule>
  28342. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_efficient_attention_backward_generated_plumbing(const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, bool is_causal, bool chunk_grad_outputs) {
  28343. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28344. auto maybe_layer = maybeCurrentDynamicLayer();
  28345. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28346. int64_t cur_level = maybe_layer->layerId();
  28347. if (!isBatchedAtLevel(grad_out_, cur_level) && !isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(out, cur_level) && !isBatchedAtLevel(logsumexp, cur_level)) {
  28348. return at::_ops::_scaled_dot_product_efficient_attention_backward::call(grad_out_, query, key, value, out, logsumexp, is_causal, chunk_grad_outputs);
  28349. }
  28350. Tensor grad_out__value;
  28351. optional<int64_t> grad_out__bdim;
  28352. std::tie(grad_out__value, grad_out__bdim) = unwrapTensorAtLevel(grad_out_, cur_level);
  28353. Tensor query_value;
  28354. optional<int64_t> query_bdim;
  28355. std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
  28356. Tensor key_value;
  28357. optional<int64_t> key_bdim;
  28358. std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
  28359. Tensor value_value;
  28360. optional<int64_t> value_bdim;
  28361. std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
  28362. Tensor out_value;
  28363. optional<int64_t> out_bdim;
  28364. std::tie(out_value, out_bdim) = unwrapTensorAtLevel(out, cur_level);
  28365. Tensor logsumexp_value;
  28366. optional<int64_t> logsumexp_bdim;
  28367. std::tie(logsumexp_value, logsumexp_bdim) = unwrapTensorAtLevel(logsumexp, cur_level);
  28368. auto results = batch_rule(grad_out__value, grad_out__bdim, query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, out_value, out_bdim, logsumexp_value, logsumexp_bdim, is_causal, chunk_grad_outputs);
  28369. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  28370. }
  28371. template <typename batch_rule_t, batch_rule_t batch_rule>
  28372. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _flash_attention_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal, int64_t philox_seed, int64_t philox_offset) {
  28373. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28374. auto maybe_layer = maybeCurrentDynamicLayer();
  28375. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28376. int64_t cur_level = maybe_layer->layerId();
  28377. if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(out, cur_level) && !isBatchedAtLevel(logsumexp, cur_level) && !isBatchedAtLevel(cum_seq_q, cur_level) && !isBatchedAtLevel(cum_seq_k, cur_level)) {
  28378. return at::_ops::_flash_attention_backward::call(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset);
  28379. }
  28380. Tensor grad_out_value;
  28381. optional<int64_t> grad_out_bdim;
  28382. std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level);
  28383. Tensor query_value;
  28384. optional<int64_t> query_bdim;
  28385. std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
  28386. Tensor key_value;
  28387. optional<int64_t> key_bdim;
  28388. std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
  28389. Tensor value_value;
  28390. optional<int64_t> value_bdim;
  28391. std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
  28392. Tensor out_value;
  28393. optional<int64_t> out_bdim;
  28394. std::tie(out_value, out_bdim) = unwrapTensorAtLevel(out, cur_level);
  28395. Tensor logsumexp_value;
  28396. optional<int64_t> logsumexp_bdim;
  28397. std::tie(logsumexp_value, logsumexp_bdim) = unwrapTensorAtLevel(logsumexp, cur_level);
  28398. Tensor cum_seq_q_value;
  28399. optional<int64_t> cum_seq_q_bdim;
  28400. std::tie(cum_seq_q_value, cum_seq_q_bdim) = unwrapTensorAtLevel(cum_seq_q, cur_level);
  28401. Tensor cum_seq_k_value;
  28402. optional<int64_t> cum_seq_k_bdim;
  28403. std::tie(cum_seq_k_value, cum_seq_k_bdim) = unwrapTensorAtLevel(cum_seq_k, cur_level);
  28404. auto results = batch_rule(grad_out_value, grad_out_bdim, query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, out_value, out_bdim, logsumexp_value, logsumexp_bdim, cum_seq_q_value, cum_seq_q_bdim, cum_seq_k_value, cum_seq_k_bdim, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset);
  28405. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  28406. }
  28407. template <typename batch_rule_t, batch_rule_t batch_rule>
  28408. ::std::tuple<at::Tensor,at::Tensor> _efficient_attention_forward_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & cu_seqlens_q, const c10::optional<at::Tensor> & cu_seqlens_k, c10::optional<int64_t> max_seqlen_q, bool compute_log_sumexp, bool causal) {
  28409. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28410. auto maybe_layer = maybeCurrentDynamicLayer();
  28411. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28412. int64_t cur_level = maybe_layer->layerId();
  28413. if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(cu_seqlens_q, cur_level) && !isBatchedAtLevel(cu_seqlens_k, cur_level)) {
  28414. return at::_ops::_efficient_attention_forward::call(query, key, value, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, compute_log_sumexp, causal);
  28415. }
  28416. Tensor query_value;
  28417. optional<int64_t> query_bdim;
  28418. std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
  28419. Tensor key_value;
  28420. optional<int64_t> key_bdim;
  28421. std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
  28422. Tensor value_value;
  28423. optional<int64_t> value_bdim;
  28424. std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
  28425. optional<Tensor> cu_seqlens_q_value;
  28426. optional<int64_t> cu_seqlens_q_bdim;
  28427. if (cu_seqlens_q) {
  28428. std::tie(cu_seqlens_q_value, cu_seqlens_q_bdim) = unwrapTensorAtLevel(cu_seqlens_q.value(), cur_level);
  28429. }
  28430. optional<Tensor> cu_seqlens_k_value;
  28431. optional<int64_t> cu_seqlens_k_bdim;
  28432. if (cu_seqlens_k) {
  28433. std::tie(cu_seqlens_k_value, cu_seqlens_k_bdim) = unwrapTensorAtLevel(cu_seqlens_k.value(), cur_level);
  28434. }
  28435. auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, cu_seqlens_q_value, cu_seqlens_q_bdim, cu_seqlens_k_value, cu_seqlens_k_bdim, max_seqlen_q, compute_log_sumexp, causal);
  28436. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  28437. }
  28438. template <typename batch_rule_t, batch_rule_t batch_rule>
  28439. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _efficient_attention_backward_generated_plumbing(const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, bool is_causal, bool chunk_grad_outputs) {
  28440. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28441. auto maybe_layer = maybeCurrentDynamicLayer();
  28442. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28443. int64_t cur_level = maybe_layer->layerId();
  28444. if (!isBatchedAtLevel(grad_out_, cur_level) && !isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(out, cur_level) && !isBatchedAtLevel(logsumexp, cur_level)) {
  28445. return at::_ops::_efficient_attention_backward::call(grad_out_, query, key, value, out, logsumexp, is_causal, chunk_grad_outputs);
  28446. }
  28447. Tensor grad_out__value;
  28448. optional<int64_t> grad_out__bdim;
  28449. std::tie(grad_out__value, grad_out__bdim) = unwrapTensorAtLevel(grad_out_, cur_level);
  28450. Tensor query_value;
  28451. optional<int64_t> query_bdim;
  28452. std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
  28453. Tensor key_value;
  28454. optional<int64_t> key_bdim;
  28455. std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
  28456. Tensor value_value;
  28457. optional<int64_t> value_bdim;
  28458. std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
  28459. Tensor out_value;
  28460. optional<int64_t> out_bdim;
  28461. std::tie(out_value, out_bdim) = unwrapTensorAtLevel(out, cur_level);
  28462. Tensor logsumexp_value;
  28463. optional<int64_t> logsumexp_bdim;
  28464. std::tie(logsumexp_value, logsumexp_bdim) = unwrapTensorAtLevel(logsumexp, cur_level);
  28465. auto results = batch_rule(grad_out__value, grad_out__bdim, query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, out_value, out_bdim, logsumexp_value, logsumexp_bdim, is_causal, chunk_grad_outputs);
  28466. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  28467. }
  28468. template <typename batch_rule_t, batch_rule_t batch_rule>
  28469. at::Tensor _triton_scaled_dot_attention_generated_plumbing(const at::Tensor & q, const at::Tensor & k, const at::Tensor & v, double dropout_p) {
  28470. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28471. auto maybe_layer = maybeCurrentDynamicLayer();
  28472. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28473. int64_t cur_level = maybe_layer->layerId();
  28474. if (!isBatchedAtLevel(q, cur_level) && !isBatchedAtLevel(k, cur_level) && !isBatchedAtLevel(v, cur_level)) {
  28475. return at::_ops::_triton_scaled_dot_attention::call(q, k, v, dropout_p);
  28476. }
  28477. Tensor q_value;
  28478. optional<int64_t> q_bdim;
  28479. std::tie(q_value, q_bdim) = unwrapTensorAtLevel(q, cur_level);
  28480. Tensor k_value;
  28481. optional<int64_t> k_bdim;
  28482. std::tie(k_value, k_bdim) = unwrapTensorAtLevel(k, cur_level);
  28483. Tensor v_value;
  28484. optional<int64_t> v_bdim;
  28485. std::tie(v_value, v_bdim) = unwrapTensorAtLevel(v, cur_level);
  28486. auto results = batch_rule(q_value, q_bdim, k_value, k_bdim, v_value, v_bdim, dropout_p);
  28487. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  28488. }
  28489. template <typename batch_rule_t, batch_rule_t batch_rule>
  28490. at::Tensor _triton_multi_head_attention_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask) {
  28491. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28492. auto maybe_layer = maybeCurrentDynamicLayer();
  28493. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28494. int64_t cur_level = maybe_layer->layerId();
  28495. if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(qkv_weight, cur_level) && !isBatchedAtLevel(qkv_bias, cur_level) && !isBatchedAtLevel(proj_weight, cur_level) && !isBatchedAtLevel(proj_bias, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
  28496. return at::_ops::_triton_multi_head_attention::call(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask);
  28497. }
  28498. Tensor query_value;
  28499. optional<int64_t> query_bdim;
  28500. std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
  28501. Tensor key_value;
  28502. optional<int64_t> key_bdim;
  28503. std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
  28504. Tensor value_value;
  28505. optional<int64_t> value_bdim;
  28506. std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
  28507. Tensor qkv_weight_value;
  28508. optional<int64_t> qkv_weight_bdim;
  28509. std::tie(qkv_weight_value, qkv_weight_bdim) = unwrapTensorAtLevel(qkv_weight, cur_level);
  28510. Tensor qkv_bias_value;
  28511. optional<int64_t> qkv_bias_bdim;
  28512. std::tie(qkv_bias_value, qkv_bias_bdim) = unwrapTensorAtLevel(qkv_bias, cur_level);
  28513. Tensor proj_weight_value;
  28514. optional<int64_t> proj_weight_bdim;
  28515. std::tie(proj_weight_value, proj_weight_bdim) = unwrapTensorAtLevel(proj_weight, cur_level);
  28516. Tensor proj_bias_value;
  28517. optional<int64_t> proj_bias_bdim;
  28518. std::tie(proj_bias_value, proj_bias_bdim) = unwrapTensorAtLevel(proj_bias, cur_level);
  28519. optional<Tensor> mask_value;
  28520. optional<int64_t> mask_bdim;
  28521. if (mask) {
  28522. std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask.value(), cur_level);
  28523. }
  28524. auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, embed_dim, num_head, qkv_weight_value, qkv_weight_bdim, qkv_bias_value, qkv_bias_bdim, proj_weight_value, proj_weight_bdim, proj_bias_value, proj_bias_bdim, mask_value, mask_bdim);
  28525. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  28526. }
  28527. template <typename batch_rule_t, batch_rule_t batch_rule>
  28528. at::Tensor special_airy_ai_generated_plumbing(const at::Tensor & x) {
  28529. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28530. auto maybe_layer = maybeCurrentDynamicLayer();
  28531. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28532. int64_t cur_level = maybe_layer->layerId();
  28533. if (!isBatchedAtLevel(x, cur_level)) {
  28534. return at::_ops::special_airy_ai::call(x);
  28535. }
  28536. Tensor x_value;
  28537. optional<int64_t> x_bdim;
  28538. std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
  28539. auto results = batch_rule(x_value, x_bdim);
  28540. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  28541. }
  28542. template <typename batch_rule_t, batch_rule_t batch_rule>
  28543. ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _transformer_decoder_only_layer_fwd_generated_plumbing(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional<at::Tensor> & mask, const c10::optional<at::Tensor> & incr_key, const c10::optional<at::Tensor> & incr_value) {
  28544. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28545. auto maybe_layer = maybeCurrentDynamicLayer();
  28546. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28547. int64_t cur_level = maybe_layer->layerId();
  28548. if (!isBatchedAtLevel(src, cur_level) && !isBatchedAtLevel(qkv_weight, cur_level) && !isBatchedAtLevel(qkv_bias, cur_level) && !isBatchedAtLevel(proj_weight, cur_level) && !isBatchedAtLevel(proj_bias, cur_level) && !isBatchedAtLevel(norm_weight_1, cur_level) && !isBatchedAtLevel(norm_bias_1, cur_level) && !isBatchedAtLevel(norm_weight_2, cur_level) && !isBatchedAtLevel(norm_bias_2, cur_level) && !isBatchedAtLevel(ffn_weight_1, cur_level) && !isBatchedAtLevel(ffn_bias_1, cur_level) && !isBatchedAtLevel(ffn_weight_2, cur_level) && !isBatchedAtLevel(ffn_bias_2, cur_level) && !isBatchedAtLevel(mask, cur_level) && !isBatchedAtLevel(incr_key, cur_level) && !isBatchedAtLevel(incr_value, cur_level)) {
  28549. return at::_ops::_transformer_decoder_only_layer_fwd::call(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, incr_key, incr_value);
  28550. }
  28551. Tensor src_value;
  28552. optional<int64_t> src_bdim;
  28553. std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
  28554. Tensor qkv_weight_value;
  28555. optional<int64_t> qkv_weight_bdim;
  28556. std::tie(qkv_weight_value, qkv_weight_bdim) = unwrapTensorAtLevel(qkv_weight, cur_level);
  28557. Tensor qkv_bias_value;
  28558. optional<int64_t> qkv_bias_bdim;
  28559. std::tie(qkv_bias_value, qkv_bias_bdim) = unwrapTensorAtLevel(qkv_bias, cur_level);
  28560. Tensor proj_weight_value;
  28561. optional<int64_t> proj_weight_bdim;
  28562. std::tie(proj_weight_value, proj_weight_bdim) = unwrapTensorAtLevel(proj_weight, cur_level);
  28563. Tensor proj_bias_value;
  28564. optional<int64_t> proj_bias_bdim;
  28565. std::tie(proj_bias_value, proj_bias_bdim) = unwrapTensorAtLevel(proj_bias, cur_level);
  28566. Tensor norm_weight_1_value;
  28567. optional<int64_t> norm_weight_1_bdim;
  28568. std::tie(norm_weight_1_value, norm_weight_1_bdim) = unwrapTensorAtLevel(norm_weight_1, cur_level);
  28569. Tensor norm_bias_1_value;
  28570. optional<int64_t> norm_bias_1_bdim;
  28571. std::tie(norm_bias_1_value, norm_bias_1_bdim) = unwrapTensorAtLevel(norm_bias_1, cur_level);
  28572. Tensor norm_weight_2_value;
  28573. optional<int64_t> norm_weight_2_bdim;
  28574. std::tie(norm_weight_2_value, norm_weight_2_bdim) = unwrapTensorAtLevel(norm_weight_2, cur_level);
  28575. Tensor norm_bias_2_value;
  28576. optional<int64_t> norm_bias_2_bdim;
  28577. std::tie(norm_bias_2_value, norm_bias_2_bdim) = unwrapTensorAtLevel(norm_bias_2, cur_level);
  28578. Tensor ffn_weight_1_value;
  28579. optional<int64_t> ffn_weight_1_bdim;
  28580. std::tie(ffn_weight_1_value, ffn_weight_1_bdim) = unwrapTensorAtLevel(ffn_weight_1, cur_level);
  28581. Tensor ffn_bias_1_value;
  28582. optional<int64_t> ffn_bias_1_bdim;
  28583. std::tie(ffn_bias_1_value, ffn_bias_1_bdim) = unwrapTensorAtLevel(ffn_bias_1, cur_level);
  28584. Tensor ffn_weight_2_value;
  28585. optional<int64_t> ffn_weight_2_bdim;
  28586. std::tie(ffn_weight_2_value, ffn_weight_2_bdim) = unwrapTensorAtLevel(ffn_weight_2, cur_level);
  28587. Tensor ffn_bias_2_value;
  28588. optional<int64_t> ffn_bias_2_bdim;
  28589. std::tie(ffn_bias_2_value, ffn_bias_2_bdim) = unwrapTensorAtLevel(ffn_bias_2, cur_level);
  28590. optional<Tensor> mask_value;
  28591. optional<int64_t> mask_bdim;
  28592. if (mask) {
  28593. std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask.value(), cur_level);
  28594. }
  28595. optional<Tensor> incr_key_value;
  28596. optional<int64_t> incr_key_bdim;
  28597. if (incr_key) {
  28598. std::tie(incr_key_value, incr_key_bdim) = unwrapTensorAtLevel(incr_key.value(), cur_level);
  28599. }
  28600. optional<Tensor> incr_value_value;
  28601. optional<int64_t> incr_value_bdim;
  28602. if (incr_value) {
  28603. std::tie(incr_value_value, incr_value_bdim) = unwrapTensorAtLevel(incr_value.value(), cur_level);
  28604. }
  28605. auto results = batch_rule(src_value, src_bdim, embed_dim, num_heads, qkv_weight_value, qkv_weight_bdim, qkv_bias_value, qkv_bias_bdim, proj_weight_value, proj_weight_bdim, proj_bias_value, proj_bias_bdim, use_gelu, norm_first, eps, norm_weight_1_value, norm_weight_1_bdim, norm_bias_1_value, norm_bias_1_bdim, norm_weight_2_value, norm_weight_2_bdim, norm_bias_2_value, norm_bias_2_bdim, ffn_weight_1_value, ffn_weight_1_bdim, ffn_bias_1_value, ffn_bias_1_bdim, ffn_weight_2_value, ffn_weight_2_bdim, ffn_bias_2_value, ffn_bias_2_bdim, mask_value, mask_bdim, incr_key_value, incr_key_bdim, incr_value_value, incr_value_bdim);
  28606. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
  28607. }
  28608. template <typename batch_rule_t, batch_rule_t batch_rule>
  28609. ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _native_decoder_only_multi_head_attention_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask, const c10::optional<at::Tensor> & incr_key, const c10::optional<at::Tensor> & incr_value, bool need_weights, bool average_attn_weights) {
  28610. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28611. auto maybe_layer = maybeCurrentDynamicLayer();
  28612. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28613. int64_t cur_level = maybe_layer->layerId();
  28614. if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(qkv_weight, cur_level) && !isBatchedAtLevel(qkv_bias, cur_level) && !isBatchedAtLevel(proj_weight, cur_level) && !isBatchedAtLevel(proj_bias, cur_level) && !isBatchedAtLevel(mask, cur_level) && !isBatchedAtLevel(incr_key, cur_level) && !isBatchedAtLevel(incr_value, cur_level)) {
  28615. return at::_ops::_native_decoder_only_multi_head_attention::call(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, incr_key, incr_value, need_weights, average_attn_weights);
  28616. }
  28617. Tensor query_value;
  28618. optional<int64_t> query_bdim;
  28619. std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
  28620. Tensor key_value;
  28621. optional<int64_t> key_bdim;
  28622. std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
  28623. Tensor value_value;
  28624. optional<int64_t> value_bdim;
  28625. std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
  28626. Tensor qkv_weight_value;
  28627. optional<int64_t> qkv_weight_bdim;
  28628. std::tie(qkv_weight_value, qkv_weight_bdim) = unwrapTensorAtLevel(qkv_weight, cur_level);
  28629. Tensor qkv_bias_value;
  28630. optional<int64_t> qkv_bias_bdim;
  28631. std::tie(qkv_bias_value, qkv_bias_bdim) = unwrapTensorAtLevel(qkv_bias, cur_level);
  28632. Tensor proj_weight_value;
  28633. optional<int64_t> proj_weight_bdim;
  28634. std::tie(proj_weight_value, proj_weight_bdim) = unwrapTensorAtLevel(proj_weight, cur_level);
  28635. Tensor proj_bias_value;
  28636. optional<int64_t> proj_bias_bdim;
  28637. std::tie(proj_bias_value, proj_bias_bdim) = unwrapTensorAtLevel(proj_bias, cur_level);
  28638. optional<Tensor> mask_value;
  28639. optional<int64_t> mask_bdim;
  28640. if (mask) {
  28641. std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask.value(), cur_level);
  28642. }
  28643. optional<Tensor> incr_key_value;
  28644. optional<int64_t> incr_key_bdim;
  28645. if (incr_key) {
  28646. std::tie(incr_key_value, incr_key_bdim) = unwrapTensorAtLevel(incr_key.value(), cur_level);
  28647. }
  28648. optional<Tensor> incr_value_value;
  28649. optional<int64_t> incr_value_bdim;
  28650. if (incr_value) {
  28651. std::tie(incr_value_value, incr_value_bdim) = unwrapTensorAtLevel(incr_value.value(), cur_level);
  28652. }
  28653. auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, embed_dim, num_head, qkv_weight_value, qkv_weight_bdim, qkv_bias_value, qkv_bias_bdim, proj_weight_value, proj_weight_bdim, proj_bias_value, proj_bias_bdim, mask_value, mask_bdim, incr_key_value, incr_key_bdim, incr_value_value, incr_value_bdim, need_weights, average_attn_weights);
  28654. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
  28655. }
  28656. template <typename batch_rule_t, batch_rule_t batch_rule>
  28657. at::Tensor special_bessel_j0_generated_plumbing(const at::Tensor & self) {
  28658. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28659. auto maybe_layer = maybeCurrentDynamicLayer();
  28660. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28661. int64_t cur_level = maybe_layer->layerId();
  28662. if (!isBatchedAtLevel(self, cur_level)) {
  28663. return at::_ops::special_bessel_j0::call(self);
  28664. }
  28665. Tensor self_value;
  28666. optional<int64_t> self_bdim;
  28667. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  28668. auto results = batch_rule(self_value, self_bdim);
  28669. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  28670. }
  28671. template <typename batch_rule_t, batch_rule_t batch_rule>
  28672. at::Tensor special_bessel_j1_generated_plumbing(const at::Tensor & self) {
  28673. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28674. auto maybe_layer = maybeCurrentDynamicLayer();
  28675. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28676. int64_t cur_level = maybe_layer->layerId();
  28677. if (!isBatchedAtLevel(self, cur_level)) {
  28678. return at::_ops::special_bessel_j1::call(self);
  28679. }
  28680. Tensor self_value;
  28681. optional<int64_t> self_bdim;
  28682. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  28683. auto results = batch_rule(self_value, self_bdim);
  28684. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  28685. }
  28686. template <typename batch_rule_t, batch_rule_t batch_rule>
  28687. at::Tensor special_bessel_y0_generated_plumbing(const at::Tensor & self) {
  28688. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28689. auto maybe_layer = maybeCurrentDynamicLayer();
  28690. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28691. int64_t cur_level = maybe_layer->layerId();
  28692. if (!isBatchedAtLevel(self, cur_level)) {
  28693. return at::_ops::special_bessel_y0::call(self);
  28694. }
  28695. Tensor self_value;
  28696. optional<int64_t> self_bdim;
  28697. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  28698. auto results = batch_rule(self_value, self_bdim);
  28699. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  28700. }
  28701. template <typename batch_rule_t, batch_rule_t batch_rule>
  28702. at::Tensor special_bessel_y1_generated_plumbing(const at::Tensor & self) {
  28703. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28704. auto maybe_layer = maybeCurrentDynamicLayer();
  28705. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28706. int64_t cur_level = maybe_layer->layerId();
  28707. if (!isBatchedAtLevel(self, cur_level)) {
  28708. return at::_ops::special_bessel_y1::call(self);
  28709. }
  28710. Tensor self_value;
  28711. optional<int64_t> self_bdim;
  28712. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  28713. auto results = batch_rule(self_value, self_bdim);
  28714. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  28715. }
  28716. template <typename batch_rule_t, batch_rule_t batch_rule>
  28717. at::Tensor special_chebyshev_polynomial_t_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
  28718. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28719. auto maybe_layer = maybeCurrentDynamicLayer();
  28720. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28721. int64_t cur_level = maybe_layer->layerId();
  28722. if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
  28723. return at::_ops::special_chebyshev_polynomial_t::call(x, n);
  28724. }
  28725. Tensor x_value;
  28726. optional<int64_t> x_bdim;
  28727. std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
  28728. Tensor n_value;
  28729. optional<int64_t> n_bdim;
  28730. std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
  28731. auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
  28732. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  28733. }
  28734. template <typename batch_rule_t, batch_rule_t batch_rule>
  28735. at::Tensor special_chebyshev_polynomial_t_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
  28736. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28737. auto maybe_layer = maybeCurrentDynamicLayer();
  28738. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28739. int64_t cur_level = maybe_layer->layerId();
  28740. if (!isBatchedAtLevel(n, cur_level)) {
  28741. return at::_ops::special_chebyshev_polynomial_t_x_scalar::call(x, n);
  28742. }
  28743. Tensor n_value;
  28744. optional<int64_t> n_bdim;
  28745. std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
  28746. auto results = batch_rule(x, n_value, n_bdim);
  28747. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  28748. }
  28749. template <typename batch_rule_t, batch_rule_t batch_rule>
  28750. at::Tensor special_chebyshev_polynomial_t_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
  28751. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28752. auto maybe_layer = maybeCurrentDynamicLayer();
  28753. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28754. int64_t cur_level = maybe_layer->layerId();
  28755. if (!isBatchedAtLevel(x, cur_level)) {
  28756. return at::_ops::special_chebyshev_polynomial_t_n_scalar::call(x, n);
  28757. }
  28758. Tensor x_value;
  28759. optional<int64_t> x_bdim;
  28760. std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
  28761. auto results = batch_rule(x_value, x_bdim, n);
  28762. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  28763. }
  28764. template <typename batch_rule_t, batch_rule_t batch_rule>
  28765. at::Tensor special_chebyshev_polynomial_u_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
  28766. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28767. auto maybe_layer = maybeCurrentDynamicLayer();
  28768. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28769. int64_t cur_level = maybe_layer->layerId();
  28770. if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
  28771. return at::_ops::special_chebyshev_polynomial_u::call(x, n);
  28772. }
  28773. Tensor x_value;
  28774. optional<int64_t> x_bdim;
  28775. std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
  28776. Tensor n_value;
  28777. optional<int64_t> n_bdim;
  28778. std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
  28779. auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
  28780. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  28781. }
  28782. template <typename batch_rule_t, batch_rule_t batch_rule>
  28783. at::Tensor special_chebyshev_polynomial_u_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
  28784. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28785. auto maybe_layer = maybeCurrentDynamicLayer();
  28786. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28787. int64_t cur_level = maybe_layer->layerId();
  28788. if (!isBatchedAtLevel(n, cur_level)) {
  28789. return at::_ops::special_chebyshev_polynomial_u_x_scalar::call(x, n);
  28790. }
  28791. Tensor n_value;
  28792. optional<int64_t> n_bdim;
  28793. std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
  28794. auto results = batch_rule(x, n_value, n_bdim);
  28795. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  28796. }
  28797. template <typename batch_rule_t, batch_rule_t batch_rule>
  28798. at::Tensor special_chebyshev_polynomial_u_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
  28799. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28800. auto maybe_layer = maybeCurrentDynamicLayer();
  28801. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28802. int64_t cur_level = maybe_layer->layerId();
  28803. if (!isBatchedAtLevel(x, cur_level)) {
  28804. return at::_ops::special_chebyshev_polynomial_u_n_scalar::call(x, n);
  28805. }
  28806. Tensor x_value;
  28807. optional<int64_t> x_bdim;
  28808. std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
  28809. auto results = batch_rule(x_value, x_bdim, n);
  28810. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  28811. }
  28812. template <typename batch_rule_t, batch_rule_t batch_rule>
  28813. at::Tensor special_chebyshev_polynomial_v_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
  28814. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28815. auto maybe_layer = maybeCurrentDynamicLayer();
  28816. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28817. int64_t cur_level = maybe_layer->layerId();
  28818. if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
  28819. return at::_ops::special_chebyshev_polynomial_v::call(x, n);
  28820. }
  28821. Tensor x_value;
  28822. optional<int64_t> x_bdim;
  28823. std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
  28824. Tensor n_value;
  28825. optional<int64_t> n_bdim;
  28826. std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
  28827. auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
  28828. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  28829. }
  28830. template <typename batch_rule_t, batch_rule_t batch_rule>
  28831. at::Tensor special_chebyshev_polynomial_v_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
  28832. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28833. auto maybe_layer = maybeCurrentDynamicLayer();
  28834. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28835. int64_t cur_level = maybe_layer->layerId();
  28836. if (!isBatchedAtLevel(n, cur_level)) {
  28837. return at::_ops::special_chebyshev_polynomial_v_x_scalar::call(x, n);
  28838. }
  28839. Tensor n_value;
  28840. optional<int64_t> n_bdim;
  28841. std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
  28842. auto results = batch_rule(x, n_value, n_bdim);
  28843. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  28844. }
  28845. template <typename batch_rule_t, batch_rule_t batch_rule>
  28846. at::Tensor special_chebyshev_polynomial_v_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
  28847. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28848. auto maybe_layer = maybeCurrentDynamicLayer();
  28849. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28850. int64_t cur_level = maybe_layer->layerId();
  28851. if (!isBatchedAtLevel(x, cur_level)) {
  28852. return at::_ops::special_chebyshev_polynomial_v_n_scalar::call(x, n);
  28853. }
  28854. Tensor x_value;
  28855. optional<int64_t> x_bdim;
  28856. std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
  28857. auto results = batch_rule(x_value, x_bdim, n);
  28858. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  28859. }
  28860. template <typename batch_rule_t, batch_rule_t batch_rule>
  28861. at::Tensor special_chebyshev_polynomial_w_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
  28862. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28863. auto maybe_layer = maybeCurrentDynamicLayer();
  28864. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28865. int64_t cur_level = maybe_layer->layerId();
  28866. if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
  28867. return at::_ops::special_chebyshev_polynomial_w::call(x, n);
  28868. }
  28869. Tensor x_value;
  28870. optional<int64_t> x_bdim;
  28871. std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
  28872. Tensor n_value;
  28873. optional<int64_t> n_bdim;
  28874. std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
  28875. auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
  28876. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  28877. }
  28878. template <typename batch_rule_t, batch_rule_t batch_rule>
  28879. at::Tensor special_chebyshev_polynomial_w_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
  28880. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28881. auto maybe_layer = maybeCurrentDynamicLayer();
  28882. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28883. int64_t cur_level = maybe_layer->layerId();
  28884. if (!isBatchedAtLevel(n, cur_level)) {
  28885. return at::_ops::special_chebyshev_polynomial_w_x_scalar::call(x, n);
  28886. }
  28887. Tensor n_value;
  28888. optional<int64_t> n_bdim;
  28889. std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
  28890. auto results = batch_rule(x, n_value, n_bdim);
  28891. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  28892. }
  28893. template <typename batch_rule_t, batch_rule_t batch_rule>
  28894. at::Tensor special_chebyshev_polynomial_w_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
  28895. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28896. auto maybe_layer = maybeCurrentDynamicLayer();
  28897. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28898. int64_t cur_level = maybe_layer->layerId();
  28899. if (!isBatchedAtLevel(x, cur_level)) {
  28900. return at::_ops::special_chebyshev_polynomial_w_n_scalar::call(x, n);
  28901. }
  28902. Tensor x_value;
  28903. optional<int64_t> x_bdim;
  28904. std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
  28905. auto results = batch_rule(x_value, x_bdim, n);
  28906. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  28907. }
  28908. template <typename batch_rule_t, batch_rule_t batch_rule>
  28909. at::Tensor special_hermite_polynomial_h_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
  28910. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28911. auto maybe_layer = maybeCurrentDynamicLayer();
  28912. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28913. int64_t cur_level = maybe_layer->layerId();
  28914. if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
  28915. return at::_ops::special_hermite_polynomial_h::call(x, n);
  28916. }
  28917. Tensor x_value;
  28918. optional<int64_t> x_bdim;
  28919. std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
  28920. Tensor n_value;
  28921. optional<int64_t> n_bdim;
  28922. std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
  28923. auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
  28924. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  28925. }
  28926. template <typename batch_rule_t, batch_rule_t batch_rule>
  28927. at::Tensor special_hermite_polynomial_h_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
  28928. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28929. auto maybe_layer = maybeCurrentDynamicLayer();
  28930. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28931. int64_t cur_level = maybe_layer->layerId();
  28932. if (!isBatchedAtLevel(n, cur_level)) {
  28933. return at::_ops::special_hermite_polynomial_h_x_scalar::call(x, n);
  28934. }
  28935. Tensor n_value;
  28936. optional<int64_t> n_bdim;
  28937. std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
  28938. auto results = batch_rule(x, n_value, n_bdim);
  28939. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  28940. }
  28941. template <typename batch_rule_t, batch_rule_t batch_rule>
  28942. at::Tensor special_hermite_polynomial_h_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
  28943. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28944. auto maybe_layer = maybeCurrentDynamicLayer();
  28945. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28946. int64_t cur_level = maybe_layer->layerId();
  28947. if (!isBatchedAtLevel(x, cur_level)) {
  28948. return at::_ops::special_hermite_polynomial_h_n_scalar::call(x, n);
  28949. }
  28950. Tensor x_value;
  28951. optional<int64_t> x_bdim;
  28952. std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
  28953. auto results = batch_rule(x_value, x_bdim, n);
  28954. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  28955. }
  28956. template <typename batch_rule_t, batch_rule_t batch_rule>
  28957. at::Tensor special_hermite_polynomial_he_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
  28958. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28959. auto maybe_layer = maybeCurrentDynamicLayer();
  28960. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28961. int64_t cur_level = maybe_layer->layerId();
  28962. if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
  28963. return at::_ops::special_hermite_polynomial_he::call(x, n);
  28964. }
  28965. Tensor x_value;
  28966. optional<int64_t> x_bdim;
  28967. std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
  28968. Tensor n_value;
  28969. optional<int64_t> n_bdim;
  28970. std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
  28971. auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
  28972. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  28973. }
  28974. template <typename batch_rule_t, batch_rule_t batch_rule>
  28975. at::Tensor special_hermite_polynomial_he_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
  28976. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28977. auto maybe_layer = maybeCurrentDynamicLayer();
  28978. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28979. int64_t cur_level = maybe_layer->layerId();
  28980. if (!isBatchedAtLevel(n, cur_level)) {
  28981. return at::_ops::special_hermite_polynomial_he_x_scalar::call(x, n);
  28982. }
  28983. Tensor n_value;
  28984. optional<int64_t> n_bdim;
  28985. std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
  28986. auto results = batch_rule(x, n_value, n_bdim);
  28987. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  28988. }
  28989. template <typename batch_rule_t, batch_rule_t batch_rule>
  28990. at::Tensor special_hermite_polynomial_he_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
  28991. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  28992. auto maybe_layer = maybeCurrentDynamicLayer();
  28993. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  28994. int64_t cur_level = maybe_layer->layerId();
  28995. if (!isBatchedAtLevel(x, cur_level)) {
  28996. return at::_ops::special_hermite_polynomial_he_n_scalar::call(x, n);
  28997. }
  28998. Tensor x_value;
  28999. optional<int64_t> x_bdim;
  29000. std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
  29001. auto results = batch_rule(x_value, x_bdim, n);
  29002. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29003. }
  29004. template <typename batch_rule_t, batch_rule_t batch_rule>
  29005. at::Tensor special_laguerre_polynomial_l_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
  29006. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29007. auto maybe_layer = maybeCurrentDynamicLayer();
  29008. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29009. int64_t cur_level = maybe_layer->layerId();
  29010. if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
  29011. return at::_ops::special_laguerre_polynomial_l::call(x, n);
  29012. }
  29013. Tensor x_value;
  29014. optional<int64_t> x_bdim;
  29015. std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
  29016. Tensor n_value;
  29017. optional<int64_t> n_bdim;
  29018. std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
  29019. auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
  29020. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29021. }
  29022. template <typename batch_rule_t, batch_rule_t batch_rule>
  29023. at::Tensor special_laguerre_polynomial_l_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
  29024. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29025. auto maybe_layer = maybeCurrentDynamicLayer();
  29026. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29027. int64_t cur_level = maybe_layer->layerId();
  29028. if (!isBatchedAtLevel(n, cur_level)) {
  29029. return at::_ops::special_laguerre_polynomial_l_x_scalar::call(x, n);
  29030. }
  29031. Tensor n_value;
  29032. optional<int64_t> n_bdim;
  29033. std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
  29034. auto results = batch_rule(x, n_value, n_bdim);
  29035. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29036. }
  29037. template <typename batch_rule_t, batch_rule_t batch_rule>
  29038. at::Tensor special_laguerre_polynomial_l_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
  29039. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29040. auto maybe_layer = maybeCurrentDynamicLayer();
  29041. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29042. int64_t cur_level = maybe_layer->layerId();
  29043. if (!isBatchedAtLevel(x, cur_level)) {
  29044. return at::_ops::special_laguerre_polynomial_l_n_scalar::call(x, n);
  29045. }
  29046. Tensor x_value;
  29047. optional<int64_t> x_bdim;
  29048. std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
  29049. auto results = batch_rule(x_value, x_bdim, n);
  29050. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29051. }
  29052. template <typename batch_rule_t, batch_rule_t batch_rule>
  29053. at::Tensor special_legendre_polynomial_p_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
  29054. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29055. auto maybe_layer = maybeCurrentDynamicLayer();
  29056. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29057. int64_t cur_level = maybe_layer->layerId();
  29058. if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
  29059. return at::_ops::special_legendre_polynomial_p::call(x, n);
  29060. }
  29061. Tensor x_value;
  29062. optional<int64_t> x_bdim;
  29063. std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
  29064. Tensor n_value;
  29065. optional<int64_t> n_bdim;
  29066. std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
  29067. auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
  29068. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29069. }
  29070. template <typename batch_rule_t, batch_rule_t batch_rule>
  29071. at::Tensor special_legendre_polynomial_p_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
  29072. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29073. auto maybe_layer = maybeCurrentDynamicLayer();
  29074. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29075. int64_t cur_level = maybe_layer->layerId();
  29076. if (!isBatchedAtLevel(n, cur_level)) {
  29077. return at::_ops::special_legendre_polynomial_p_x_scalar::call(x, n);
  29078. }
  29079. Tensor n_value;
  29080. optional<int64_t> n_bdim;
  29081. std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
  29082. auto results = batch_rule(x, n_value, n_bdim);
  29083. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29084. }
  29085. template <typename batch_rule_t, batch_rule_t batch_rule>
  29086. at::Tensor special_legendre_polynomial_p_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
  29087. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29088. auto maybe_layer = maybeCurrentDynamicLayer();
  29089. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29090. int64_t cur_level = maybe_layer->layerId();
  29091. if (!isBatchedAtLevel(x, cur_level)) {
  29092. return at::_ops::special_legendre_polynomial_p_n_scalar::call(x, n);
  29093. }
  29094. Tensor x_value;
  29095. optional<int64_t> x_bdim;
  29096. std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
  29097. auto results = batch_rule(x_value, x_bdim, n);
  29098. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29099. }
  29100. template <typename batch_rule_t, batch_rule_t batch_rule>
  29101. at::Tensor special_modified_bessel_i0_generated_plumbing(const at::Tensor & self) {
  29102. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29103. auto maybe_layer = maybeCurrentDynamicLayer();
  29104. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29105. int64_t cur_level = maybe_layer->layerId();
  29106. if (!isBatchedAtLevel(self, cur_level)) {
  29107. return at::_ops::special_modified_bessel_i0::call(self);
  29108. }
  29109. Tensor self_value;
  29110. optional<int64_t> self_bdim;
  29111. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  29112. auto results = batch_rule(self_value, self_bdim);
  29113. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29114. }
  29115. template <typename batch_rule_t, batch_rule_t batch_rule>
  29116. at::Tensor special_modified_bessel_i1_generated_plumbing(const at::Tensor & self) {
  29117. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29118. auto maybe_layer = maybeCurrentDynamicLayer();
  29119. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29120. int64_t cur_level = maybe_layer->layerId();
  29121. if (!isBatchedAtLevel(self, cur_level)) {
  29122. return at::_ops::special_modified_bessel_i1::call(self);
  29123. }
  29124. Tensor self_value;
  29125. optional<int64_t> self_bdim;
  29126. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  29127. auto results = batch_rule(self_value, self_bdim);
  29128. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29129. }
  29130. template <typename batch_rule_t, batch_rule_t batch_rule>
  29131. at::Tensor special_modified_bessel_k0_generated_plumbing(const at::Tensor & self) {
  29132. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29133. auto maybe_layer = maybeCurrentDynamicLayer();
  29134. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29135. int64_t cur_level = maybe_layer->layerId();
  29136. if (!isBatchedAtLevel(self, cur_level)) {
  29137. return at::_ops::special_modified_bessel_k0::call(self);
  29138. }
  29139. Tensor self_value;
  29140. optional<int64_t> self_bdim;
  29141. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  29142. auto results = batch_rule(self_value, self_bdim);
  29143. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29144. }
  29145. template <typename batch_rule_t, batch_rule_t batch_rule>
  29146. at::Tensor special_modified_bessel_k1_generated_plumbing(const at::Tensor & self) {
  29147. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29148. auto maybe_layer = maybeCurrentDynamicLayer();
  29149. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29150. int64_t cur_level = maybe_layer->layerId();
  29151. if (!isBatchedAtLevel(self, cur_level)) {
  29152. return at::_ops::special_modified_bessel_k1::call(self);
  29153. }
  29154. Tensor self_value;
  29155. optional<int64_t> self_bdim;
  29156. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  29157. auto results = batch_rule(self_value, self_bdim);
  29158. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29159. }
  29160. template <typename batch_rule_t, batch_rule_t batch_rule>
  29161. at::Tensor special_scaled_modified_bessel_k0_generated_plumbing(const at::Tensor & x) {
  29162. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29163. auto maybe_layer = maybeCurrentDynamicLayer();
  29164. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29165. int64_t cur_level = maybe_layer->layerId();
  29166. if (!isBatchedAtLevel(x, cur_level)) {
  29167. return at::_ops::special_scaled_modified_bessel_k0::call(x);
  29168. }
  29169. Tensor x_value;
  29170. optional<int64_t> x_bdim;
  29171. std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
  29172. auto results = batch_rule(x_value, x_bdim);
  29173. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29174. }
  29175. template <typename batch_rule_t, batch_rule_t batch_rule>
  29176. at::Tensor special_scaled_modified_bessel_k1_generated_plumbing(const at::Tensor & x) {
  29177. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29178. auto maybe_layer = maybeCurrentDynamicLayer();
  29179. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29180. int64_t cur_level = maybe_layer->layerId();
  29181. if (!isBatchedAtLevel(x, cur_level)) {
  29182. return at::_ops::special_scaled_modified_bessel_k1::call(x);
  29183. }
  29184. Tensor x_value;
  29185. optional<int64_t> x_bdim;
  29186. std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
  29187. auto results = batch_rule(x_value, x_bdim);
  29188. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29189. }
  29190. template <typename batch_rule_t, batch_rule_t batch_rule>
  29191. at::Tensor special_shifted_chebyshev_polynomial_t_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
  29192. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29193. auto maybe_layer = maybeCurrentDynamicLayer();
  29194. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29195. int64_t cur_level = maybe_layer->layerId();
  29196. if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
  29197. return at::_ops::special_shifted_chebyshev_polynomial_t::call(x, n);
  29198. }
  29199. Tensor x_value;
  29200. optional<int64_t> x_bdim;
  29201. std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
  29202. Tensor n_value;
  29203. optional<int64_t> n_bdim;
  29204. std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
  29205. auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
  29206. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29207. }
  29208. template <typename batch_rule_t, batch_rule_t batch_rule>
  29209. at::Tensor special_shifted_chebyshev_polynomial_t_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
  29210. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29211. auto maybe_layer = maybeCurrentDynamicLayer();
  29212. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29213. int64_t cur_level = maybe_layer->layerId();
  29214. if (!isBatchedAtLevel(n, cur_level)) {
  29215. return at::_ops::special_shifted_chebyshev_polynomial_t_x_scalar::call(x, n);
  29216. }
  29217. Tensor n_value;
  29218. optional<int64_t> n_bdim;
  29219. std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
  29220. auto results = batch_rule(x, n_value, n_bdim);
  29221. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29222. }
  29223. template <typename batch_rule_t, batch_rule_t batch_rule>
  29224. at::Tensor special_shifted_chebyshev_polynomial_t_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
  29225. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29226. auto maybe_layer = maybeCurrentDynamicLayer();
  29227. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29228. int64_t cur_level = maybe_layer->layerId();
  29229. if (!isBatchedAtLevel(x, cur_level)) {
  29230. return at::_ops::special_shifted_chebyshev_polynomial_t_n_scalar::call(x, n);
  29231. }
  29232. Tensor x_value;
  29233. optional<int64_t> x_bdim;
  29234. std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
  29235. auto results = batch_rule(x_value, x_bdim, n);
  29236. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29237. }
  29238. template <typename batch_rule_t, batch_rule_t batch_rule>
  29239. at::Tensor special_shifted_chebyshev_polynomial_u_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
  29240. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29241. auto maybe_layer = maybeCurrentDynamicLayer();
  29242. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29243. int64_t cur_level = maybe_layer->layerId();
  29244. if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
  29245. return at::_ops::special_shifted_chebyshev_polynomial_u::call(x, n);
  29246. }
  29247. Tensor x_value;
  29248. optional<int64_t> x_bdim;
  29249. std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
  29250. Tensor n_value;
  29251. optional<int64_t> n_bdim;
  29252. std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
  29253. auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
  29254. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29255. }
  29256. template <typename batch_rule_t, batch_rule_t batch_rule>
  29257. at::Tensor special_shifted_chebyshev_polynomial_u_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
  29258. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29259. auto maybe_layer = maybeCurrentDynamicLayer();
  29260. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29261. int64_t cur_level = maybe_layer->layerId();
  29262. if (!isBatchedAtLevel(n, cur_level)) {
  29263. return at::_ops::special_shifted_chebyshev_polynomial_u_x_scalar::call(x, n);
  29264. }
  29265. Tensor n_value;
  29266. optional<int64_t> n_bdim;
  29267. std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
  29268. auto results = batch_rule(x, n_value, n_bdim);
  29269. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29270. }
  29271. template <typename batch_rule_t, batch_rule_t batch_rule>
  29272. at::Tensor special_shifted_chebyshev_polynomial_u_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
  29273. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29274. auto maybe_layer = maybeCurrentDynamicLayer();
  29275. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29276. int64_t cur_level = maybe_layer->layerId();
  29277. if (!isBatchedAtLevel(x, cur_level)) {
  29278. return at::_ops::special_shifted_chebyshev_polynomial_u_n_scalar::call(x, n);
  29279. }
  29280. Tensor x_value;
  29281. optional<int64_t> x_bdim;
  29282. std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
  29283. auto results = batch_rule(x_value, x_bdim, n);
  29284. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29285. }
  29286. template <typename batch_rule_t, batch_rule_t batch_rule>
  29287. at::Tensor special_shifted_chebyshev_polynomial_v_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
  29288. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29289. auto maybe_layer = maybeCurrentDynamicLayer();
  29290. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29291. int64_t cur_level = maybe_layer->layerId();
  29292. if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
  29293. return at::_ops::special_shifted_chebyshev_polynomial_v::call(x, n);
  29294. }
  29295. Tensor x_value;
  29296. optional<int64_t> x_bdim;
  29297. std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
  29298. Tensor n_value;
  29299. optional<int64_t> n_bdim;
  29300. std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
  29301. auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
  29302. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29303. }
  29304. template <typename batch_rule_t, batch_rule_t batch_rule>
  29305. at::Tensor special_shifted_chebyshev_polynomial_v_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
  29306. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29307. auto maybe_layer = maybeCurrentDynamicLayer();
  29308. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29309. int64_t cur_level = maybe_layer->layerId();
  29310. if (!isBatchedAtLevel(n, cur_level)) {
  29311. return at::_ops::special_shifted_chebyshev_polynomial_v_x_scalar::call(x, n);
  29312. }
  29313. Tensor n_value;
  29314. optional<int64_t> n_bdim;
  29315. std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
  29316. auto results = batch_rule(x, n_value, n_bdim);
  29317. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29318. }
  29319. template <typename batch_rule_t, batch_rule_t batch_rule>
  29320. at::Tensor special_shifted_chebyshev_polynomial_v_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
  29321. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29322. auto maybe_layer = maybeCurrentDynamicLayer();
  29323. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29324. int64_t cur_level = maybe_layer->layerId();
  29325. if (!isBatchedAtLevel(x, cur_level)) {
  29326. return at::_ops::special_shifted_chebyshev_polynomial_v_n_scalar::call(x, n);
  29327. }
  29328. Tensor x_value;
  29329. optional<int64_t> x_bdim;
  29330. std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
  29331. auto results = batch_rule(x_value, x_bdim, n);
  29332. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29333. }
  29334. template <typename batch_rule_t, batch_rule_t batch_rule>
  29335. at::Tensor special_shifted_chebyshev_polynomial_w_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
  29336. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29337. auto maybe_layer = maybeCurrentDynamicLayer();
  29338. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29339. int64_t cur_level = maybe_layer->layerId();
  29340. if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
  29341. return at::_ops::special_shifted_chebyshev_polynomial_w::call(x, n);
  29342. }
  29343. Tensor x_value;
  29344. optional<int64_t> x_bdim;
  29345. std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
  29346. Tensor n_value;
  29347. optional<int64_t> n_bdim;
  29348. std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
  29349. auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
  29350. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29351. }
  29352. template <typename batch_rule_t, batch_rule_t batch_rule>
  29353. at::Tensor special_shifted_chebyshev_polynomial_w_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
  29354. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29355. auto maybe_layer = maybeCurrentDynamicLayer();
  29356. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29357. int64_t cur_level = maybe_layer->layerId();
  29358. if (!isBatchedAtLevel(n, cur_level)) {
  29359. return at::_ops::special_shifted_chebyshev_polynomial_w_x_scalar::call(x, n);
  29360. }
  29361. Tensor n_value;
  29362. optional<int64_t> n_bdim;
  29363. std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
  29364. auto results = batch_rule(x, n_value, n_bdim);
  29365. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29366. }
  29367. template <typename batch_rule_t, batch_rule_t batch_rule>
  29368. at::Tensor special_shifted_chebyshev_polynomial_w_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
  29369. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29370. auto maybe_layer = maybeCurrentDynamicLayer();
  29371. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29372. int64_t cur_level = maybe_layer->layerId();
  29373. if (!isBatchedAtLevel(x, cur_level)) {
  29374. return at::_ops::special_shifted_chebyshev_polynomial_w_n_scalar::call(x, n);
  29375. }
  29376. Tensor x_value;
  29377. optional<int64_t> x_bdim;
  29378. std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
  29379. auto results = batch_rule(x_value, x_bdim, n);
  29380. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29381. }
  29382. template <typename batch_rule_t, batch_rule_t batch_rule>
  29383. at::Tensor special_spherical_bessel_j0_generated_plumbing(const at::Tensor & x) {
  29384. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29385. auto maybe_layer = maybeCurrentDynamicLayer();
  29386. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29387. int64_t cur_level = maybe_layer->layerId();
  29388. if (!isBatchedAtLevel(x, cur_level)) {
  29389. return at::_ops::special_spherical_bessel_j0::call(x);
  29390. }
  29391. Tensor x_value;
  29392. optional<int64_t> x_bdim;
  29393. std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
  29394. auto results = batch_rule(x_value, x_bdim);
  29395. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29396. }
  29397. template <typename batch_rule_t, batch_rule_t batch_rule>
  29398. at::Tensor _foobar_generated_plumbing(const at::Tensor & self, bool arg1, bool arg2, bool arg3) {
  29399. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29400. auto maybe_layer = maybeCurrentDynamicLayer();
  29401. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29402. int64_t cur_level = maybe_layer->layerId();
  29403. if (!isBatchedAtLevel(self, cur_level)) {
  29404. return at::_ops::_foobar::call(self, arg1, arg2, arg3);
  29405. }
  29406. Tensor self_value;
  29407. optional<int64_t> self_bdim;
  29408. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  29409. auto results = batch_rule(self_value, self_bdim, arg1, arg2, arg3);
  29410. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29411. }
  29412. template <typename batch_rule_t, batch_rule_t batch_rule>
  29413. void _fused_adam__generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf) {
  29414. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29415. auto maybe_layer = maybeCurrentDynamicLayer();
  29416. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  29417. int64_t cur_level = maybe_layer->layerId();
  29418. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
  29419. return at::_ops::_fused_adam_::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
  29420. }
  29421. optional<Tensor> grad_scale_value;
  29422. optional<int64_t> grad_scale_bdim;
  29423. if (grad_scale) {
  29424. std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
  29425. }
  29426. optional<Tensor> found_inf_value;
  29427. optional<int64_t> found_inf_bdim;
  29428. if (found_inf) {
  29429. std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
  29430. }
  29431. batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim);
  29432. }
  29433. template <typename batch_rule_t, batch_rule_t batch_rule>
  29434. void _fused_adamw__generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf) {
  29435. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29436. auto maybe_layer = maybeCurrentDynamicLayer();
  29437. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  29438. int64_t cur_level = maybe_layer->layerId();
  29439. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
  29440. return at::_ops::_fused_adamw_::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
  29441. }
  29442. optional<Tensor> grad_scale_value;
  29443. optional<int64_t> grad_scale_bdim;
  29444. if (grad_scale) {
  29445. std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
  29446. }
  29447. optional<Tensor> found_inf_value;
  29448. optional<int64_t> found_inf_bdim;
  29449. if (found_inf) {
  29450. std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
  29451. }
  29452. batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim);
  29453. }
  29454. template <typename batch_rule_t, batch_rule_t batch_rule>
  29455. void _cudnn_rnn_backward_out_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) {
  29456. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29457. auto maybe_layer = maybeCurrentDynamicLayer();
  29458. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  29459. int64_t cur_level = maybe_layer->layerId();
  29460. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(weight_buf, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(dropout_state, cur_level) && !isBatchedAtLevel(reserve, cur_level) && !isBatchedAtLevel(out0, cur_level) && !isBatchedAtLevel(out1, cur_level) && !isBatchedAtLevel(out2, cur_level) && !isBatchedAtLevel(out3, cur_level)) {
  29461. return at::_ops::_cudnn_rnn_backward_out::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3);
  29462. }
  29463. Tensor input_value;
  29464. optional<int64_t> input_bdim;
  29465. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  29466. Tensor weight_buf_value;
  29467. optional<int64_t> weight_buf_bdim;
  29468. std::tie(weight_buf_value, weight_buf_bdim) = unwrapTensorAtLevel(weight_buf, cur_level);
  29469. Tensor hx_value;
  29470. optional<int64_t> hx_bdim;
  29471. std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
  29472. Tensor output_value;
  29473. optional<int64_t> output_bdim;
  29474. std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
  29475. Tensor reserve_value;
  29476. optional<int64_t> reserve_bdim;
  29477. std::tie(reserve_value, reserve_bdim) = unwrapTensorAtLevel(reserve, cur_level);
  29478. Tensor out0_value;
  29479. optional<int64_t> out0_bdim;
  29480. std::tie(out0_value, out0_bdim) = unwrapTensorAtLevel(out0, cur_level);
  29481. Tensor out1_value;
  29482. optional<int64_t> out1_bdim;
  29483. std::tie(out1_value, out1_bdim) = unwrapTensorAtLevel(out1, cur_level);
  29484. Tensor out2_value;
  29485. optional<int64_t> out2_bdim;
  29486. std::tie(out2_value, out2_bdim) = unwrapTensorAtLevel(out2, cur_level);
  29487. optional<Tensor> cx_value;
  29488. optional<int64_t> cx_bdim;
  29489. if (cx) {
  29490. std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level);
  29491. }
  29492. optional<Tensor> grad_output_value;
  29493. optional<int64_t> grad_output_bdim;
  29494. if (grad_output) {
  29495. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output.value(), cur_level);
  29496. }
  29497. optional<Tensor> grad_hy_value;
  29498. optional<int64_t> grad_hy_bdim;
  29499. if (grad_hy) {
  29500. std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
  29501. }
  29502. optional<Tensor> grad_cy_value;
  29503. optional<int64_t> grad_cy_bdim;
  29504. if (grad_cy) {
  29505. std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
  29506. }
  29507. optional<Tensor> dropout_state_value;
  29508. optional<int64_t> dropout_state_bdim;
  29509. if (dropout_state) {
  29510. std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level);
  29511. }
  29512. batch_rule(input_value, input_bdim, weight, weight_stride0, weight_buf_value, weight_buf_bdim, hx_value, hx_bdim, cx_value, cx_bdim, output_value, output_bdim, grad_output_value, grad_output_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim, reserve_value, reserve_bdim, output_mask, out0_value, out0_bdim, out1_value, out1_bdim, out2_value, out2_bdim, out3);
  29513. }
  29514. template <typename batch_rule_t, batch_rule_t batch_rule>
  29515. at::Tensor bernoulli_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & p, c10::optional<at::Generator> generator) {
  29516. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29517. auto maybe_layer = maybeCurrentDynamicLayer();
  29518. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29519. int64_t cur_level = maybe_layer->layerId();
  29520. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(p, cur_level)) {
  29521. return at::_ops::bernoulli_Tensor::call(self, p, generator);
  29522. }
  29523. Tensor self_value;
  29524. optional<int64_t> self_bdim;
  29525. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  29526. Tensor p_value;
  29527. optional<int64_t> p_bdim;
  29528. std::tie(p_value, p_bdim) = unwrapTensorAtLevel(p, cur_level);
  29529. auto results = batch_rule(self_value, self_bdim, p_value, p_bdim, generator);
  29530. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29531. }
  29532. template <typename batch_rule_t, batch_rule_t batch_rule>
  29533. at::Tensor embedding_renorm_generated_plumbing(const at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) {
  29534. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29535. auto maybe_layer = maybeCurrentDynamicLayer();
  29536. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29537. int64_t cur_level = maybe_layer->layerId();
  29538. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
  29539. return at::_ops::embedding_renorm::call(self, indices, max_norm, norm_type);
  29540. }
  29541. Tensor self_value;
  29542. optional<int64_t> self_bdim;
  29543. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  29544. Tensor indices_value;
  29545. optional<int64_t> indices_bdim;
  29546. std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
  29547. auto results = batch_rule(self_value, self_bdim, indices_value, indices_bdim, max_norm, norm_type);
  29548. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29549. }
  29550. template <typename batch_rule_t, batch_rule_t batch_rule>
  29551. at::Tensor resize_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format) {
  29552. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29553. auto maybe_layer = maybeCurrentDynamicLayer();
  29554. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29555. int64_t cur_level = maybe_layer->layerId();
  29556. if (!isBatchedAtLevel(self, cur_level)) {
  29557. return at::_ops::resize::call(self, size, memory_format);
  29558. }
  29559. Tensor self_value;
  29560. optional<int64_t> self_bdim;
  29561. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  29562. auto results = batch_rule(self_value, self_bdim, size, memory_format);
  29563. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29564. }
  29565. template <typename batch_rule_t, batch_rule_t batch_rule>
  29566. at::Tensor _resize_output_generated_plumbing(const at::Tensor & self, at::IntArrayRef size, at::Device device) {
  29567. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29568. auto maybe_layer = maybeCurrentDynamicLayer();
  29569. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29570. int64_t cur_level = maybe_layer->layerId();
  29571. if (!isBatchedAtLevel(self, cur_level)) {
  29572. return at::_ops::_resize_output::call(self, size, device);
  29573. }
  29574. Tensor self_value;
  29575. optional<int64_t> self_bdim;
  29576. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  29577. auto results = batch_rule(self_value, self_bdim, size, device);
  29578. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29579. }
  29580. template <typename batch_rule_t, batch_rule_t batch_rule>
  29581. at::Tensor _index_put_impl_generated_plumbing(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe) {
  29582. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29583. auto maybe_layer = maybeCurrentDynamicLayer();
  29584. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29585. int64_t cur_level = maybe_layer->layerId();
  29586. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
  29587. return at::_ops::_index_put_impl::call(self, indices, values, accumulate, unsafe);
  29588. }
  29589. Tensor self_value;
  29590. optional<int64_t> self_bdim;
  29591. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  29592. Tensor values_value;
  29593. optional<int64_t> values_bdim;
  29594. std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
  29595. auto results = batch_rule(self_value, self_bdim, indices, values_value, values_bdim, accumulate, unsafe);
  29596. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29597. }
  29598. template <typename batch_rule_t, batch_rule_t batch_rule>
  29599. void miopen_rnn_backward_out_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) {
  29600. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29601. auto maybe_layer = maybeCurrentDynamicLayer();
  29602. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  29603. int64_t cur_level = maybe_layer->layerId();
  29604. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(weight_buf, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(dropout_state, cur_level) && !isBatchedAtLevel(reserve, cur_level) && !isBatchedAtLevel(out0, cur_level) && !isBatchedAtLevel(out1, cur_level) && !isBatchedAtLevel(out2, cur_level) && !isBatchedAtLevel(out3, cur_level)) {
  29605. return at::_ops::miopen_rnn_backward_out::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3);
  29606. }
  29607. Tensor input_value;
  29608. optional<int64_t> input_bdim;
  29609. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  29610. Tensor weight_buf_value;
  29611. optional<int64_t> weight_buf_bdim;
  29612. std::tie(weight_buf_value, weight_buf_bdim) = unwrapTensorAtLevel(weight_buf, cur_level);
  29613. Tensor hx_value;
  29614. optional<int64_t> hx_bdim;
  29615. std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
  29616. Tensor output_value;
  29617. optional<int64_t> output_bdim;
  29618. std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
  29619. Tensor reserve_value;
  29620. optional<int64_t> reserve_bdim;
  29621. std::tie(reserve_value, reserve_bdim) = unwrapTensorAtLevel(reserve, cur_level);
  29622. Tensor out0_value;
  29623. optional<int64_t> out0_bdim;
  29624. std::tie(out0_value, out0_bdim) = unwrapTensorAtLevel(out0, cur_level);
  29625. Tensor out1_value;
  29626. optional<int64_t> out1_bdim;
  29627. std::tie(out1_value, out1_bdim) = unwrapTensorAtLevel(out1, cur_level);
  29628. Tensor out2_value;
  29629. optional<int64_t> out2_bdim;
  29630. std::tie(out2_value, out2_bdim) = unwrapTensorAtLevel(out2, cur_level);
  29631. optional<Tensor> cx_value;
  29632. optional<int64_t> cx_bdim;
  29633. if (cx) {
  29634. std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level);
  29635. }
  29636. optional<Tensor> grad_output_value;
  29637. optional<int64_t> grad_output_bdim;
  29638. if (grad_output) {
  29639. std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output.value(), cur_level);
  29640. }
  29641. optional<Tensor> grad_hy_value;
  29642. optional<int64_t> grad_hy_bdim;
  29643. if (grad_hy) {
  29644. std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
  29645. }
  29646. optional<Tensor> grad_cy_value;
  29647. optional<int64_t> grad_cy_bdim;
  29648. if (grad_cy) {
  29649. std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
  29650. }
  29651. optional<Tensor> dropout_state_value;
  29652. optional<int64_t> dropout_state_bdim;
  29653. if (dropout_state) {
  29654. std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level);
  29655. }
  29656. batch_rule(input_value, input_bdim, weight, weight_stride0, weight_buf_value, weight_buf_bdim, hx_value, hx_bdim, cx_value, cx_bdim, output_value, output_bdim, grad_output_value, grad_output_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim, reserve_value, reserve_bdim, output_mask, out0_value, out0_bdim, out1_value, out1_bdim, out2_value, out2_bdim, out3);
  29657. }
  29658. template <typename batch_rule_t, batch_rule_t batch_rule>
  29659. ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit_functional_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & running_mean, const at::Tensor & running_var, bool training, double momentum, double eps) {
  29660. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29661. auto maybe_layer = maybeCurrentDynamicLayer();
  29662. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29663. int64_t cur_level = maybe_layer->layerId();
  29664. if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
  29665. return at::_ops::_native_batch_norm_legit_functional::call(input, weight, bias, running_mean, running_var, training, momentum, eps);
  29666. }
  29667. Tensor input_value;
  29668. optional<int64_t> input_bdim;
  29669. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  29670. Tensor running_mean_value;
  29671. optional<int64_t> running_mean_bdim;
  29672. std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean, cur_level);
  29673. Tensor running_var_value;
  29674. optional<int64_t> running_var_bdim;
  29675. std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var, cur_level);
  29676. optional<Tensor> weight_value;
  29677. optional<int64_t> weight_bdim;
  29678. if (weight) {
  29679. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
  29680. }
  29681. optional<Tensor> bias_value;
  29682. optional<int64_t> bias_bdim;
  29683. if (bias) {
  29684. std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
  29685. }
  29686. auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, training, momentum, eps);
  29687. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level));
  29688. }
  29689. template <typename batch_rule_t, batch_rule_t batch_rule>
  29690. void unsafe_split_Tensor_out_generated_plumbing(const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out) {
  29691. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29692. auto maybe_layer = maybeCurrentDynamicLayer();
  29693. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  29694. int64_t cur_level = maybe_layer->layerId();
  29695. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  29696. return at::_ops::unsafe_split_Tensor_out::call(self, split_size, dim, out);
  29697. }
  29698. Tensor self_value;
  29699. optional<int64_t> self_bdim;
  29700. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  29701. batch_rule(self_value, self_bdim, split_size, dim, out);
  29702. }
  29703. template <typename batch_rule_t, batch_rule_t batch_rule>
  29704. void unsafe_split_with_sizes_out_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) {
  29705. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29706. auto maybe_layer = maybeCurrentDynamicLayer();
  29707. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  29708. int64_t cur_level = maybe_layer->layerId();
  29709. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  29710. return at::_ops::unsafe_split_with_sizes_out::call(self, split_sizes, dim, out);
  29711. }
  29712. Tensor self_value;
  29713. optional<int64_t> self_bdim;
  29714. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  29715. batch_rule(self_value, self_bdim, split_sizes, dim, out);
  29716. }
  29717. template <typename batch_rule_t, batch_rule_t batch_rule>
  29718. at::Tensor resize_as_generated_plumbing(const at::Tensor & self, const at::Tensor & the_template, c10::optional<at::MemoryFormat> memory_format) {
  29719. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29720. auto maybe_layer = maybeCurrentDynamicLayer();
  29721. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29722. int64_t cur_level = maybe_layer->layerId();
  29723. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(the_template, cur_level)) {
  29724. return at::_ops::resize_as::call(self, the_template, memory_format);
  29725. }
  29726. Tensor self_value;
  29727. optional<int64_t> self_bdim;
  29728. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  29729. Tensor the_template_value;
  29730. optional<int64_t> the_template_bdim;
  29731. std::tie(the_template_value, the_template_bdim) = unwrapTensorAtLevel(the_template, cur_level);
  29732. auto results = batch_rule(self_value, self_bdim, the_template_value, the_template_bdim, memory_format);
  29733. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29734. }
  29735. template <typename batch_rule_t, batch_rule_t batch_rule>
  29736. at::Tensor resize_as_sparse_generated_plumbing(const at::Tensor & self, const at::Tensor & the_template) {
  29737. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29738. auto maybe_layer = maybeCurrentDynamicLayer();
  29739. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29740. int64_t cur_level = maybe_layer->layerId();
  29741. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(the_template, cur_level)) {
  29742. return at::_ops::resize_as_sparse::call(self, the_template);
  29743. }
  29744. Tensor self_value;
  29745. optional<int64_t> self_bdim;
  29746. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  29747. Tensor the_template_value;
  29748. optional<int64_t> the_template_bdim;
  29749. std::tie(the_template_value, the_template_bdim) = unwrapTensorAtLevel(the_template, cur_level);
  29750. auto results = batch_rule(self_value, self_bdim, the_template_value, the_template_bdim);
  29751. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29752. }
  29753. template <typename batch_rule_t, batch_rule_t batch_rule>
  29754. at::Tensor zero_generated_plumbing(const at::Tensor & self) {
  29755. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29756. auto maybe_layer = maybeCurrentDynamicLayer();
  29757. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29758. int64_t cur_level = maybe_layer->layerId();
  29759. if (!isBatchedAtLevel(self, cur_level)) {
  29760. return at::_ops::zero::call(self);
  29761. }
  29762. Tensor self_value;
  29763. optional<int64_t> self_bdim;
  29764. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  29765. auto results = batch_rule(self_value, self_bdim);
  29766. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29767. }
  29768. template <typename batch_rule_t, batch_rule_t batch_rule>
  29769. at::Tensor sparse_resize_generated_plumbing(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
  29770. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29771. auto maybe_layer = maybeCurrentDynamicLayer();
  29772. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29773. int64_t cur_level = maybe_layer->layerId();
  29774. if (!isBatchedAtLevel(self, cur_level)) {
  29775. return at::_ops::sparse_resize::call(self, size, sparse_dim, dense_dim);
  29776. }
  29777. Tensor self_value;
  29778. optional<int64_t> self_bdim;
  29779. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  29780. auto results = batch_rule(self_value, self_bdim, size, sparse_dim, dense_dim);
  29781. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29782. }
  29783. template <typename batch_rule_t, batch_rule_t batch_rule>
  29784. at::Tensor sparse_resize_and_clear_generated_plumbing(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
  29785. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29786. auto maybe_layer = maybeCurrentDynamicLayer();
  29787. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29788. int64_t cur_level = maybe_layer->layerId();
  29789. if (!isBatchedAtLevel(self, cur_level)) {
  29790. return at::_ops::sparse_resize_and_clear::call(self, size, sparse_dim, dense_dim);
  29791. }
  29792. Tensor self_value;
  29793. optional<int64_t> self_bdim;
  29794. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  29795. auto results = batch_rule(self_value, self_bdim, size, sparse_dim, dense_dim);
  29796. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29797. }
  29798. template <typename batch_rule_t, batch_rule_t batch_rule>
  29799. at::Tensor _coalesced_generated_plumbing(const at::Tensor & self, bool coalesced) {
  29800. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29801. auto maybe_layer = maybeCurrentDynamicLayer();
  29802. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29803. int64_t cur_level = maybe_layer->layerId();
  29804. if (!isBatchedAtLevel(self, cur_level)) {
  29805. return at::_ops::_coalesced::call(self, coalesced);
  29806. }
  29807. Tensor self_value;
  29808. optional<int64_t> self_bdim;
  29809. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  29810. auto results = batch_rule(self_value, self_bdim, coalesced);
  29811. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29812. }
  29813. template <typename batch_rule_t, batch_rule_t batch_rule>
  29814. at::Tensor copy_sparse_to_sparse_generated_plumbing(const at::Tensor & self, const at::Tensor & src, bool non_blocking) {
  29815. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29816. auto maybe_layer = maybeCurrentDynamicLayer();
  29817. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29818. int64_t cur_level = maybe_layer->layerId();
  29819. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
  29820. return at::_ops::copy_sparse_to_sparse::call(self, src, non_blocking);
  29821. }
  29822. Tensor self_value;
  29823. optional<int64_t> self_bdim;
  29824. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  29825. Tensor src_value;
  29826. optional<int64_t> src_bdim;
  29827. std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
  29828. auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, non_blocking);
  29829. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29830. }
  29831. template <typename batch_rule_t, batch_rule_t batch_rule>
  29832. void quantize_per_tensor_tensors_out_generated_plumbing(at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype, at::TensorList out) {
  29833. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29834. auto maybe_layer = maybeCurrentDynamicLayer();
  29835. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  29836. int64_t cur_level = maybe_layer->layerId();
  29837. if (!isBatchedAtLevel(tensors, cur_level) && !isBatchedAtLevel(scales, cur_level) && !isBatchedAtLevel(zero_points, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  29838. return at::_ops::quantize_per_tensor_tensors_out::call(tensors, scales, zero_points, dtype, out);
  29839. }
  29840. Tensor scales_value;
  29841. optional<int64_t> scales_bdim;
  29842. std::tie(scales_value, scales_bdim) = unwrapTensorAtLevel(scales, cur_level);
  29843. Tensor zero_points_value;
  29844. optional<int64_t> zero_points_bdim;
  29845. std::tie(zero_points_value, zero_points_bdim) = unwrapTensorAtLevel(zero_points, cur_level);
  29846. batch_rule(tensors, scales_value, scales_bdim, zero_points_value, zero_points_bdim, dtype, out);
  29847. }
  29848. template <typename batch_rule_t, batch_rule_t batch_rule>
  29849. void dequantize_tensors_out_generated_plumbing(at::TensorList tensors, at::TensorList out) {
  29850. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29851. auto maybe_layer = maybeCurrentDynamicLayer();
  29852. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  29853. int64_t cur_level = maybe_layer->layerId();
  29854. if (!isBatchedAtLevel(tensors, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  29855. return at::_ops::dequantize_tensors_out::call(tensors, out);
  29856. }
  29857. batch_rule(tensors, out);
  29858. }
  29859. template <typename batch_rule_t, batch_rule_t batch_rule>
  29860. ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _fused_moving_avg_obs_fq_helper_functional_generated_plumbing(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, const at::Tensor & running_min, const at::Tensor & running_max, const at::Tensor & scale, const at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant) {
  29861. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29862. auto maybe_layer = maybeCurrentDynamicLayer();
  29863. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29864. int64_t cur_level = maybe_layer->layerId();
  29865. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(observer_on, cur_level) && !isBatchedAtLevel(fake_quant_on, cur_level) && !isBatchedAtLevel(running_min, cur_level) && !isBatchedAtLevel(running_max, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
  29866. return at::_ops::_fused_moving_avg_obs_fq_helper_functional::call(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant);
  29867. }
  29868. Tensor self_value;
  29869. optional<int64_t> self_bdim;
  29870. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  29871. Tensor observer_on_value;
  29872. optional<int64_t> observer_on_bdim;
  29873. std::tie(observer_on_value, observer_on_bdim) = unwrapTensorAtLevel(observer_on, cur_level);
  29874. Tensor fake_quant_on_value;
  29875. optional<int64_t> fake_quant_on_bdim;
  29876. std::tie(fake_quant_on_value, fake_quant_on_bdim) = unwrapTensorAtLevel(fake_quant_on, cur_level);
  29877. Tensor running_min_value;
  29878. optional<int64_t> running_min_bdim;
  29879. std::tie(running_min_value, running_min_bdim) = unwrapTensorAtLevel(running_min, cur_level);
  29880. Tensor running_max_value;
  29881. optional<int64_t> running_max_bdim;
  29882. std::tie(running_max_value, running_max_bdim) = unwrapTensorAtLevel(running_max, cur_level);
  29883. Tensor scale_value;
  29884. optional<int64_t> scale_bdim;
  29885. std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
  29886. Tensor zero_point_value;
  29887. optional<int64_t> zero_point_bdim;
  29888. std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
  29889. auto results = batch_rule(self_value, self_bdim, observer_on_value, observer_on_bdim, fake_quant_on_value, fake_quant_on_bdim, running_min_value, running_min_bdim, running_max_value, running_max_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant);
  29890. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level), makeBatched(std::get<10>(results), std::get<11>(results), cur_level));
  29891. }
  29892. template <typename batch_rule_t, batch_rule_t batch_rule>
  29893. void lstm_mps_backward_out_generated_plumbing(const at::Tensor & grad_y, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::TensorList out1, at::TensorList out2) {
  29894. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29895. auto maybe_layer = maybeCurrentDynamicLayer();
  29896. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  29897. int64_t cur_level = maybe_layer->layerId();
  29898. if (!isBatchedAtLevel(grad_y, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(z_state, cur_level) && !isBatchedAtLevel(cell_state_fwd, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(layersOutputs, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level) && !isBatchedAtLevel(out0, cur_level) && !isBatchedAtLevel(out1, cur_level) && !isBatchedAtLevel(out2, cur_level)) {
  29899. return at::_ops::lstm_mps_backward_out::call(grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, layersOutputs, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0, out1, out2);
  29900. }
  29901. Tensor grad_y_value;
  29902. optional<int64_t> grad_y_bdim;
  29903. std::tie(grad_y_value, grad_y_bdim) = unwrapTensorAtLevel(grad_y, cur_level);
  29904. Tensor z_state_value;
  29905. optional<int64_t> z_state_bdim;
  29906. std::tie(z_state_value, z_state_bdim) = unwrapTensorAtLevel(z_state, cur_level);
  29907. Tensor cell_state_fwd_value;
  29908. optional<int64_t> cell_state_fwd_bdim;
  29909. std::tie(cell_state_fwd_value, cell_state_fwd_bdim) = unwrapTensorAtLevel(cell_state_fwd, cur_level);
  29910. Tensor input_value;
  29911. optional<int64_t> input_bdim;
  29912. std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
  29913. Tensor layersOutputs_value;
  29914. optional<int64_t> layersOutputs_bdim;
  29915. std::tie(layersOutputs_value, layersOutputs_bdim) = unwrapTensorAtLevel(layersOutputs, cur_level);
  29916. Tensor out0_value;
  29917. optional<int64_t> out0_bdim;
  29918. std::tie(out0_value, out0_bdim) = unwrapTensorAtLevel(out0, cur_level);
  29919. optional<Tensor> grad_hy_value;
  29920. optional<int64_t> grad_hy_bdim;
  29921. if (grad_hy) {
  29922. std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
  29923. }
  29924. optional<Tensor> grad_cy_value;
  29925. optional<int64_t> grad_cy_bdim;
  29926. if (grad_cy) {
  29927. std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
  29928. }
  29929. batch_rule(grad_y_value, grad_y_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, z_state_value, z_state_bdim, cell_state_fwd_value, cell_state_fwd_bdim, input_value, input_bdim, layersOutputs_value, layersOutputs_bdim, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0_value, out0_bdim, out1, out2);
  29930. }
  29931. template <typename batch_rule_t, batch_rule_t batch_rule>
  29932. at::Tensor set_source_Storage_generated_plumbing(const at::Tensor & self, at::Storage source) {
  29933. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29934. auto maybe_layer = maybeCurrentDynamicLayer();
  29935. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29936. int64_t cur_level = maybe_layer->layerId();
  29937. if (!isBatchedAtLevel(self, cur_level)) {
  29938. return at::_ops::set_source_Storage::call(self, source);
  29939. }
  29940. Tensor self_value;
  29941. optional<int64_t> self_bdim;
  29942. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  29943. auto results = batch_rule(self_value, self_bdim, source);
  29944. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29945. }
  29946. template <typename batch_rule_t, batch_rule_t batch_rule>
  29947. at::Tensor set_source_Storage_storage_offset_generated_plumbing(const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
  29948. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29949. auto maybe_layer = maybeCurrentDynamicLayer();
  29950. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29951. int64_t cur_level = maybe_layer->layerId();
  29952. if (!isBatchedAtLevel(self, cur_level)) {
  29953. return at::_ops::set_source_Storage_storage_offset::call(self, source, storage_offset, size, stride);
  29954. }
  29955. Tensor self_value;
  29956. optional<int64_t> self_bdim;
  29957. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  29958. auto results = batch_rule(self_value, self_bdim, source, storage_offset, size, stride);
  29959. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29960. }
  29961. template <typename batch_rule_t, batch_rule_t batch_rule>
  29962. at::Tensor set_source_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & source) {
  29963. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29964. auto maybe_layer = maybeCurrentDynamicLayer();
  29965. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29966. int64_t cur_level = maybe_layer->layerId();
  29967. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(source, cur_level)) {
  29968. return at::_ops::set_source_Tensor::call(self, source);
  29969. }
  29970. Tensor self_value;
  29971. optional<int64_t> self_bdim;
  29972. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  29973. Tensor source_value;
  29974. optional<int64_t> source_bdim;
  29975. std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
  29976. auto results = batch_rule(self_value, self_bdim, source_value, source_bdim);
  29977. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29978. }
  29979. template <typename batch_rule_t, batch_rule_t batch_rule>
  29980. at::Tensor set_generated_plumbing(const at::Tensor & self) {
  29981. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29982. auto maybe_layer = maybeCurrentDynamicLayer();
  29983. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29984. int64_t cur_level = maybe_layer->layerId();
  29985. if (!isBatchedAtLevel(self, cur_level)) {
  29986. return at::_ops::set::call(self);
  29987. }
  29988. Tensor self_value;
  29989. optional<int64_t> self_bdim;
  29990. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  29991. auto results = batch_rule(self_value, self_bdim);
  29992. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  29993. }
  29994. template <typename batch_rule_t, batch_rule_t batch_rule>
  29995. at::Tensor random_from_generated_plumbing(const at::Tensor & self, int64_t from, c10::optional<int64_t> to, c10::optional<at::Generator> generator) {
  29996. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  29997. auto maybe_layer = maybeCurrentDynamicLayer();
  29998. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  29999. int64_t cur_level = maybe_layer->layerId();
  30000. if (!isBatchedAtLevel(self, cur_level)) {
  30001. return at::_ops::random_from::call(self, from, to, generator);
  30002. }
  30003. Tensor self_value;
  30004. optional<int64_t> self_bdim;
  30005. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  30006. auto results = batch_rule(self_value, self_bdim, from, to, generator);
  30007. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  30008. }
  30009. template <typename batch_rule_t, batch_rule_t batch_rule>
  30010. at::Tensor random_to_generated_plumbing(const at::Tensor & self, int64_t to, c10::optional<at::Generator> generator) {
  30011. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30012. auto maybe_layer = maybeCurrentDynamicLayer();
  30013. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  30014. int64_t cur_level = maybe_layer->layerId();
  30015. if (!isBatchedAtLevel(self, cur_level)) {
  30016. return at::_ops::random_to::call(self, to, generator);
  30017. }
  30018. Tensor self_value;
  30019. optional<int64_t> self_bdim;
  30020. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  30021. auto results = batch_rule(self_value, self_bdim, to, generator);
  30022. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  30023. }
  30024. template <typename batch_rule_t, batch_rule_t batch_rule>
  30025. at::Tensor random_generated_plumbing(const at::Tensor & self, c10::optional<at::Generator> generator) {
  30026. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30027. auto maybe_layer = maybeCurrentDynamicLayer();
  30028. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  30029. int64_t cur_level = maybe_layer->layerId();
  30030. if (!isBatchedAtLevel(self, cur_level)) {
  30031. return at::_ops::random::call(self, generator);
  30032. }
  30033. Tensor self_value;
  30034. optional<int64_t> self_bdim;
  30035. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  30036. auto results = batch_rule(self_value, self_bdim, generator);
  30037. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  30038. }
  30039. template <typename batch_rule_t, batch_rule_t batch_rule>
  30040. at::Tensor uniform_generated_plumbing(const at::Tensor & self, double from, double to, c10::optional<at::Generator> generator) {
  30041. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30042. auto maybe_layer = maybeCurrentDynamicLayer();
  30043. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  30044. int64_t cur_level = maybe_layer->layerId();
  30045. if (!isBatchedAtLevel(self, cur_level)) {
  30046. return at::_ops::uniform::call(self, from, to, generator);
  30047. }
  30048. Tensor self_value;
  30049. optional<int64_t> self_bdim;
  30050. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  30051. auto results = batch_rule(self_value, self_bdim, from, to, generator);
  30052. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  30053. }
  30054. template <typename batch_rule_t, batch_rule_t batch_rule>
  30055. at::Tensor cauchy_generated_plumbing(const at::Tensor & self, double median, double sigma, c10::optional<at::Generator> generator) {
  30056. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30057. auto maybe_layer = maybeCurrentDynamicLayer();
  30058. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  30059. int64_t cur_level = maybe_layer->layerId();
  30060. if (!isBatchedAtLevel(self, cur_level)) {
  30061. return at::_ops::cauchy::call(self, median, sigma, generator);
  30062. }
  30063. Tensor self_value;
  30064. optional<int64_t> self_bdim;
  30065. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  30066. auto results = batch_rule(self_value, self_bdim, median, sigma, generator);
  30067. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  30068. }
  30069. template <typename batch_rule_t, batch_rule_t batch_rule>
  30070. at::Tensor log_normal_generated_plumbing(const at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) {
  30071. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30072. auto maybe_layer = maybeCurrentDynamicLayer();
  30073. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  30074. int64_t cur_level = maybe_layer->layerId();
  30075. if (!isBatchedAtLevel(self, cur_level)) {
  30076. return at::_ops::log_normal::call(self, mean, std, generator);
  30077. }
  30078. Tensor self_value;
  30079. optional<int64_t> self_bdim;
  30080. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  30081. auto results = batch_rule(self_value, self_bdim, mean, std, generator);
  30082. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  30083. }
  30084. template <typename batch_rule_t, batch_rule_t batch_rule>
  30085. at::Tensor exponential_generated_plumbing(const at::Tensor & self, double lambd, c10::optional<at::Generator> generator) {
  30086. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30087. auto maybe_layer = maybeCurrentDynamicLayer();
  30088. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  30089. int64_t cur_level = maybe_layer->layerId();
  30090. if (!isBatchedAtLevel(self, cur_level)) {
  30091. return at::_ops::exponential::call(self, lambd, generator);
  30092. }
  30093. Tensor self_value;
  30094. optional<int64_t> self_bdim;
  30095. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  30096. auto results = batch_rule(self_value, self_bdim, lambd, generator);
  30097. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  30098. }
  30099. template <typename batch_rule_t, batch_rule_t batch_rule>
  30100. at::Tensor geometric_generated_plumbing(const at::Tensor & self, double p, c10::optional<at::Generator> generator) {
  30101. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30102. auto maybe_layer = maybeCurrentDynamicLayer();
  30103. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  30104. int64_t cur_level = maybe_layer->layerId();
  30105. if (!isBatchedAtLevel(self, cur_level)) {
  30106. return at::_ops::geometric::call(self, p, generator);
  30107. }
  30108. Tensor self_value;
  30109. optional<int64_t> self_bdim;
  30110. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  30111. auto results = batch_rule(self_value, self_bdim, p, generator);
  30112. return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
  30113. }
  30114. template <typename batch_rule_t, batch_rule_t batch_rule>
  30115. void _histogramdd_bin_edges_out_generated_plumbing(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density, at::TensorList out) {
  30116. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30117. auto maybe_layer = maybeCurrentDynamicLayer();
  30118. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30119. int64_t cur_level = maybe_layer->layerId();
  30120. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30121. return at::_ops::_histogramdd_bin_edges_out::call(self, bins, range, weight, density, out);
  30122. }
  30123. Tensor self_value;
  30124. optional<int64_t> self_bdim;
  30125. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  30126. optional<Tensor> weight_value;
  30127. optional<int64_t> weight_bdim;
  30128. if (weight) {
  30129. std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
  30130. }
  30131. batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density, out);
  30132. }
  30133. template <typename batch_rule_t, batch_rule_t batch_rule>
  30134. void _amp_foreach_non_finite_check_and_unscale_out_generated_plumbing(at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale, at::TensorList out) {
  30135. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30136. auto maybe_layer = maybeCurrentDynamicLayer();
  30137. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30138. int64_t cur_level = maybe_layer->layerId();
  30139. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(inv_scale, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30140. return at::_ops::_amp_foreach_non_finite_check_and_unscale_out::call(self, found_inf, inv_scale, out);
  30141. }
  30142. Tensor found_inf_value;
  30143. optional<int64_t> found_inf_bdim;
  30144. std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf, cur_level);
  30145. Tensor inv_scale_value;
  30146. optional<int64_t> inv_scale_bdim;
  30147. std::tie(inv_scale_value, inv_scale_bdim) = unwrapTensorAtLevel(inv_scale, cur_level);
  30148. batch_rule(self, found_inf_value, found_inf_bdim, inv_scale_value, inv_scale_bdim, out);
  30149. }
  30150. template <typename batch_rule_t, batch_rule_t batch_rule>
  30151. ::std::tuple<::std::vector<at::Tensor>,at::Tensor> _amp_foreach_non_finite_check_and_unscale_generated_plumbing(at::TensorList self, const at::Tensor & found_inf, const at::Tensor & inv_scale) {
  30152. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30153. auto maybe_layer = maybeCurrentDynamicLayer();
  30154. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  30155. int64_t cur_level = maybe_layer->layerId();
  30156. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(inv_scale, cur_level)) {
  30157. return at::_ops::_amp_foreach_non_finite_check_and_unscale::call(self, found_inf, inv_scale);
  30158. }
  30159. Tensor found_inf_value;
  30160. optional<int64_t> found_inf_bdim;
  30161. std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf, cur_level);
  30162. Tensor inv_scale_value;
  30163. optional<int64_t> inv_scale_bdim;
  30164. std::tie(inv_scale_value, inv_scale_bdim) = unwrapTensorAtLevel(inv_scale, cur_level);
  30165. auto results = batch_rule(self, found_inf_value, found_inf_bdim, inv_scale_value, inv_scale_bdim);
  30166. return std::make_tuple(makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  30167. }
  30168. template <typename batch_rule_t, batch_rule_t batch_rule>
  30169. ::std::tuple<at::Tensor,at::Tensor> _amp_update_scale_generated_plumbing(const at::Tensor & self, const at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval) {
  30170. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30171. auto maybe_layer = maybeCurrentDynamicLayer();
  30172. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  30173. int64_t cur_level = maybe_layer->layerId();
  30174. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(growth_tracker, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
  30175. return at::_ops::_amp_update_scale::call(self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval);
  30176. }
  30177. Tensor self_value;
  30178. optional<int64_t> self_bdim;
  30179. std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
  30180. Tensor growth_tracker_value;
  30181. optional<int64_t> growth_tracker_bdim;
  30182. std::tie(growth_tracker_value, growth_tracker_bdim) = unwrapTensorAtLevel(growth_tracker, cur_level);
  30183. Tensor found_inf_value;
  30184. optional<int64_t> found_inf_bdim;
  30185. std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf, cur_level);
  30186. auto results = batch_rule(self_value, self_bdim, growth_tracker_value, growth_tracker_bdim, found_inf_value, found_inf_bdim, scale_growth_factor, scale_backoff_factor, growth_interval);
  30187. return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
  30188. }
  30189. template <typename batch_rule_t, batch_rule_t batch_rule>
  30190. void _foreach_add_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
  30191. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30192. auto maybe_layer = maybeCurrentDynamicLayer();
  30193. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30194. int64_t cur_level = maybe_layer->layerId();
  30195. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30196. return at::_ops::_foreach_add_Scalar_out::call(self, scalar, out);
  30197. }
  30198. batch_rule(self, scalar, out);
  30199. }
  30200. template <typename batch_rule_t, batch_rule_t batch_rule>
  30201. void _foreach_sub_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
  30202. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30203. auto maybe_layer = maybeCurrentDynamicLayer();
  30204. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30205. int64_t cur_level = maybe_layer->layerId();
  30206. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30207. return at::_ops::_foreach_sub_Scalar_out::call(self, scalar, out);
  30208. }
  30209. batch_rule(self, scalar, out);
  30210. }
  30211. template <typename batch_rule_t, batch_rule_t batch_rule>
  30212. void _foreach_mul_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
  30213. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30214. auto maybe_layer = maybeCurrentDynamicLayer();
  30215. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30216. int64_t cur_level = maybe_layer->layerId();
  30217. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30218. return at::_ops::_foreach_mul_Scalar_out::call(self, scalar, out);
  30219. }
  30220. batch_rule(self, scalar, out);
  30221. }
  30222. template <typename batch_rule_t, batch_rule_t batch_rule>
  30223. void _foreach_div_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
  30224. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30225. auto maybe_layer = maybeCurrentDynamicLayer();
  30226. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30227. int64_t cur_level = maybe_layer->layerId();
  30228. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30229. return at::_ops::_foreach_div_Scalar_out::call(self, scalar, out);
  30230. }
  30231. batch_rule(self, scalar, out);
  30232. }
  30233. template <typename batch_rule_t, batch_rule_t batch_rule>
  30234. void _foreach_clamp_min_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
  30235. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30236. auto maybe_layer = maybeCurrentDynamicLayer();
  30237. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30238. int64_t cur_level = maybe_layer->layerId();
  30239. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30240. return at::_ops::_foreach_clamp_min_Scalar_out::call(self, scalar, out);
  30241. }
  30242. batch_rule(self, scalar, out);
  30243. }
  30244. template <typename batch_rule_t, batch_rule_t batch_rule>
  30245. void _foreach_clamp_max_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
  30246. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30247. auto maybe_layer = maybeCurrentDynamicLayer();
  30248. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30249. int64_t cur_level = maybe_layer->layerId();
  30250. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30251. return at::_ops::_foreach_clamp_max_Scalar_out::call(self, scalar, out);
  30252. }
  30253. batch_rule(self, scalar, out);
  30254. }
  30255. template <typename batch_rule_t, batch_rule_t batch_rule>
  30256. void _foreach_maximum_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
  30257. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30258. auto maybe_layer = maybeCurrentDynamicLayer();
  30259. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30260. int64_t cur_level = maybe_layer->layerId();
  30261. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30262. return at::_ops::_foreach_maximum_Scalar_out::call(self, scalar, out);
  30263. }
  30264. batch_rule(self, scalar, out);
  30265. }
  30266. template <typename batch_rule_t, batch_rule_t batch_rule>
  30267. void _foreach_minimum_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
  30268. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30269. auto maybe_layer = maybeCurrentDynamicLayer();
  30270. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30271. int64_t cur_level = maybe_layer->layerId();
  30272. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30273. return at::_ops::_foreach_minimum_Scalar_out::call(self, scalar, out);
  30274. }
  30275. batch_rule(self, scalar, out);
  30276. }
  30277. template <typename batch_rule_t, batch_rule_t batch_rule>
  30278. void _foreach_add_List_out_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) {
  30279. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30280. auto maybe_layer = maybeCurrentDynamicLayer();
  30281. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30282. int64_t cur_level = maybe_layer->layerId();
  30283. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30284. return at::_ops::_foreach_add_List_out::call(self, other, alpha, out);
  30285. }
  30286. batch_rule(self, other, alpha, out);
  30287. }
  30288. template <typename batch_rule_t, batch_rule_t batch_rule>
  30289. void _foreach_sub_List_out_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) {
  30290. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30291. auto maybe_layer = maybeCurrentDynamicLayer();
  30292. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30293. int64_t cur_level = maybe_layer->layerId();
  30294. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30295. return at::_ops::_foreach_sub_List_out::call(self, other, alpha, out);
  30296. }
  30297. batch_rule(self, other, alpha, out);
  30298. }
  30299. template <typename batch_rule_t, batch_rule_t batch_rule>
  30300. void _foreach_mul_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) {
  30301. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30302. auto maybe_layer = maybeCurrentDynamicLayer();
  30303. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30304. int64_t cur_level = maybe_layer->layerId();
  30305. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30306. return at::_ops::_foreach_mul_List_out::call(self, other, out);
  30307. }
  30308. batch_rule(self, other, out);
  30309. }
  30310. template <typename batch_rule_t, batch_rule_t batch_rule>
  30311. void _foreach_div_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) {
  30312. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30313. auto maybe_layer = maybeCurrentDynamicLayer();
  30314. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30315. int64_t cur_level = maybe_layer->layerId();
  30316. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30317. return at::_ops::_foreach_div_List_out::call(self, other, out);
  30318. }
  30319. batch_rule(self, other, out);
  30320. }
  30321. template <typename batch_rule_t, batch_rule_t batch_rule>
  30322. void _foreach_clamp_min_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) {
  30323. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30324. auto maybe_layer = maybeCurrentDynamicLayer();
  30325. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30326. int64_t cur_level = maybe_layer->layerId();
  30327. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30328. return at::_ops::_foreach_clamp_min_List_out::call(self, other, out);
  30329. }
  30330. batch_rule(self, other, out);
  30331. }
  30332. template <typename batch_rule_t, batch_rule_t batch_rule>
  30333. void _foreach_clamp_max_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) {
  30334. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30335. auto maybe_layer = maybeCurrentDynamicLayer();
  30336. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30337. int64_t cur_level = maybe_layer->layerId();
  30338. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30339. return at::_ops::_foreach_clamp_max_List_out::call(self, other, out);
  30340. }
  30341. batch_rule(self, other, out);
  30342. }
  30343. template <typename batch_rule_t, batch_rule_t batch_rule>
  30344. void _foreach_maximum_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) {
  30345. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30346. auto maybe_layer = maybeCurrentDynamicLayer();
  30347. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30348. int64_t cur_level = maybe_layer->layerId();
  30349. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30350. return at::_ops::_foreach_maximum_List_out::call(self, other, out);
  30351. }
  30352. batch_rule(self, other, out);
  30353. }
  30354. template <typename batch_rule_t, batch_rule_t batch_rule>
  30355. void _foreach_minimum_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) {
  30356. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30357. auto maybe_layer = maybeCurrentDynamicLayer();
  30358. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30359. int64_t cur_level = maybe_layer->layerId();
  30360. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30361. return at::_ops::_foreach_minimum_List_out::call(self, other, out);
  30362. }
  30363. batch_rule(self, other, out);
  30364. }
  30365. template <typename batch_rule_t, batch_rule_t batch_rule>
  30366. void _foreach_add_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
  30367. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30368. auto maybe_layer = maybeCurrentDynamicLayer();
  30369. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30370. int64_t cur_level = maybe_layer->layerId();
  30371. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30372. return at::_ops::_foreach_add_ScalarList_out::call(self, scalars, out);
  30373. }
  30374. batch_rule(self, scalars, out);
  30375. }
  30376. template <typename batch_rule_t, batch_rule_t batch_rule>
  30377. void _foreach_sub_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
  30378. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30379. auto maybe_layer = maybeCurrentDynamicLayer();
  30380. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30381. int64_t cur_level = maybe_layer->layerId();
  30382. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30383. return at::_ops::_foreach_sub_ScalarList_out::call(self, scalars, out);
  30384. }
  30385. batch_rule(self, scalars, out);
  30386. }
  30387. template <typename batch_rule_t, batch_rule_t batch_rule>
  30388. void _foreach_div_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
  30389. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30390. auto maybe_layer = maybeCurrentDynamicLayer();
  30391. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30392. int64_t cur_level = maybe_layer->layerId();
  30393. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30394. return at::_ops::_foreach_div_ScalarList_out::call(self, scalars, out);
  30395. }
  30396. batch_rule(self, scalars, out);
  30397. }
  30398. template <typename batch_rule_t, batch_rule_t batch_rule>
  30399. void _foreach_mul_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
  30400. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30401. auto maybe_layer = maybeCurrentDynamicLayer();
  30402. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30403. int64_t cur_level = maybe_layer->layerId();
  30404. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30405. return at::_ops::_foreach_mul_ScalarList_out::call(self, scalars, out);
  30406. }
  30407. batch_rule(self, scalars, out);
  30408. }
  30409. template <typename batch_rule_t, batch_rule_t batch_rule>
  30410. void _foreach_clamp_min_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
  30411. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30412. auto maybe_layer = maybeCurrentDynamicLayer();
  30413. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30414. int64_t cur_level = maybe_layer->layerId();
  30415. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30416. return at::_ops::_foreach_clamp_min_ScalarList_out::call(self, scalars, out);
  30417. }
  30418. batch_rule(self, scalars, out);
  30419. }
  30420. template <typename batch_rule_t, batch_rule_t batch_rule>
  30421. void _foreach_clamp_max_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
  30422. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30423. auto maybe_layer = maybeCurrentDynamicLayer();
  30424. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30425. int64_t cur_level = maybe_layer->layerId();
  30426. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30427. return at::_ops::_foreach_clamp_max_ScalarList_out::call(self, scalars, out);
  30428. }
  30429. batch_rule(self, scalars, out);
  30430. }
  30431. template <typename batch_rule_t, batch_rule_t batch_rule>
  30432. void _foreach_maximum_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
  30433. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30434. auto maybe_layer = maybeCurrentDynamicLayer();
  30435. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30436. int64_t cur_level = maybe_layer->layerId();
  30437. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30438. return at::_ops::_foreach_maximum_ScalarList_out::call(self, scalars, out);
  30439. }
  30440. batch_rule(self, scalars, out);
  30441. }
  30442. template <typename batch_rule_t, batch_rule_t batch_rule>
  30443. void _foreach_minimum_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
  30444. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30445. auto maybe_layer = maybeCurrentDynamicLayer();
  30446. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30447. int64_t cur_level = maybe_layer->layerId();
  30448. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30449. return at::_ops::_foreach_minimum_ScalarList_out::call(self, scalars, out);
  30450. }
  30451. batch_rule(self, scalars, out);
  30452. }
  30453. template <typename batch_rule_t, batch_rule_t batch_rule>
  30454. void _foreach_exp_out_generated_plumbing(at::TensorList self, at::TensorList out) {
  30455. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30456. auto maybe_layer = maybeCurrentDynamicLayer();
  30457. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30458. int64_t cur_level = maybe_layer->layerId();
  30459. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30460. return at::_ops::_foreach_exp_out::call(self, out);
  30461. }
  30462. batch_rule(self, out);
  30463. }
  30464. template <typename batch_rule_t, batch_rule_t batch_rule>
  30465. void _foreach_zero_out_generated_plumbing(at::TensorList self, at::TensorList out) {
  30466. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30467. auto maybe_layer = maybeCurrentDynamicLayer();
  30468. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30469. int64_t cur_level = maybe_layer->layerId();
  30470. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30471. return at::_ops::_foreach_zero_out::call(self, out);
  30472. }
  30473. batch_rule(self, out);
  30474. }
  30475. template <typename batch_rule_t, batch_rule_t batch_rule>
  30476. ::std::vector<at::Tensor> _foreach_zero_generated_plumbing(at::TensorList self) {
  30477. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30478. auto maybe_layer = maybeCurrentDynamicLayer();
  30479. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  30480. int64_t cur_level = maybe_layer->layerId();
  30481. if (!isBatchedAtLevel(self, cur_level)) {
  30482. return at::_ops::_foreach_zero::call(self);
  30483. }
  30484. auto results = batch_rule(self);
  30485. return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
  30486. }
  30487. template <typename batch_rule_t, batch_rule_t batch_rule>
  30488. void _foreach_sqrt_out_generated_plumbing(at::TensorList self, at::TensorList out) {
  30489. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30490. auto maybe_layer = maybeCurrentDynamicLayer();
  30491. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30492. int64_t cur_level = maybe_layer->layerId();
  30493. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30494. return at::_ops::_foreach_sqrt_out::call(self, out);
  30495. }
  30496. batch_rule(self, out);
  30497. }
  30498. template <typename batch_rule_t, batch_rule_t batch_rule>
  30499. void _foreach_abs_out_generated_plumbing(at::TensorList self, at::TensorList out) {
  30500. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30501. auto maybe_layer = maybeCurrentDynamicLayer();
  30502. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30503. int64_t cur_level = maybe_layer->layerId();
  30504. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30505. return at::_ops::_foreach_abs_out::call(self, out);
  30506. }
  30507. batch_rule(self, out);
  30508. }
  30509. template <typename batch_rule_t, batch_rule_t batch_rule>
  30510. void _foreach_acos_out_generated_plumbing(at::TensorList self, at::TensorList out) {
  30511. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30512. auto maybe_layer = maybeCurrentDynamicLayer();
  30513. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30514. int64_t cur_level = maybe_layer->layerId();
  30515. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30516. return at::_ops::_foreach_acos_out::call(self, out);
  30517. }
  30518. batch_rule(self, out);
  30519. }
  30520. template <typename batch_rule_t, batch_rule_t batch_rule>
  30521. void _foreach_asin_out_generated_plumbing(at::TensorList self, at::TensorList out) {
  30522. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30523. auto maybe_layer = maybeCurrentDynamicLayer();
  30524. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30525. int64_t cur_level = maybe_layer->layerId();
  30526. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30527. return at::_ops::_foreach_asin_out::call(self, out);
  30528. }
  30529. batch_rule(self, out);
  30530. }
  30531. template <typename batch_rule_t, batch_rule_t batch_rule>
  30532. void _foreach_atan_out_generated_plumbing(at::TensorList self, at::TensorList out) {
  30533. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30534. auto maybe_layer = maybeCurrentDynamicLayer();
  30535. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30536. int64_t cur_level = maybe_layer->layerId();
  30537. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30538. return at::_ops::_foreach_atan_out::call(self, out);
  30539. }
  30540. batch_rule(self, out);
  30541. }
  30542. template <typename batch_rule_t, batch_rule_t batch_rule>
  30543. void _foreach_ceil_out_generated_plumbing(at::TensorList self, at::TensorList out) {
  30544. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30545. auto maybe_layer = maybeCurrentDynamicLayer();
  30546. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30547. int64_t cur_level = maybe_layer->layerId();
  30548. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30549. return at::_ops::_foreach_ceil_out::call(self, out);
  30550. }
  30551. batch_rule(self, out);
  30552. }
  30553. template <typename batch_rule_t, batch_rule_t batch_rule>
  30554. void _foreach_cos_out_generated_plumbing(at::TensorList self, at::TensorList out) {
  30555. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30556. auto maybe_layer = maybeCurrentDynamicLayer();
  30557. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30558. int64_t cur_level = maybe_layer->layerId();
  30559. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30560. return at::_ops::_foreach_cos_out::call(self, out);
  30561. }
  30562. batch_rule(self, out);
  30563. }
  30564. template <typename batch_rule_t, batch_rule_t batch_rule>
  30565. void _foreach_cosh_out_generated_plumbing(at::TensorList self, at::TensorList out) {
  30566. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30567. auto maybe_layer = maybeCurrentDynamicLayer();
  30568. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30569. int64_t cur_level = maybe_layer->layerId();
  30570. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30571. return at::_ops::_foreach_cosh_out::call(self, out);
  30572. }
  30573. batch_rule(self, out);
  30574. }
  30575. template <typename batch_rule_t, batch_rule_t batch_rule>
  30576. void _foreach_erf_out_generated_plumbing(at::TensorList self, at::TensorList out) {
  30577. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30578. auto maybe_layer = maybeCurrentDynamicLayer();
  30579. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30580. int64_t cur_level = maybe_layer->layerId();
  30581. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30582. return at::_ops::_foreach_erf_out::call(self, out);
  30583. }
  30584. batch_rule(self, out);
  30585. }
  30586. template <typename batch_rule_t, batch_rule_t batch_rule>
  30587. void _foreach_erfc_out_generated_plumbing(at::TensorList self, at::TensorList out) {
  30588. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30589. auto maybe_layer = maybeCurrentDynamicLayer();
  30590. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30591. int64_t cur_level = maybe_layer->layerId();
  30592. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30593. return at::_ops::_foreach_erfc_out::call(self, out);
  30594. }
  30595. batch_rule(self, out);
  30596. }
  30597. template <typename batch_rule_t, batch_rule_t batch_rule>
  30598. void _foreach_expm1_out_generated_plumbing(at::TensorList self, at::TensorList out) {
  30599. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30600. auto maybe_layer = maybeCurrentDynamicLayer();
  30601. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30602. int64_t cur_level = maybe_layer->layerId();
  30603. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30604. return at::_ops::_foreach_expm1_out::call(self, out);
  30605. }
  30606. batch_rule(self, out);
  30607. }
  30608. template <typename batch_rule_t, batch_rule_t batch_rule>
  30609. void _foreach_floor_out_generated_plumbing(at::TensorList self, at::TensorList out) {
  30610. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30611. auto maybe_layer = maybeCurrentDynamicLayer();
  30612. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30613. int64_t cur_level = maybe_layer->layerId();
  30614. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30615. return at::_ops::_foreach_floor_out::call(self, out);
  30616. }
  30617. batch_rule(self, out);
  30618. }
  30619. template <typename batch_rule_t, batch_rule_t batch_rule>
  30620. void _foreach_log_out_generated_plumbing(at::TensorList self, at::TensorList out) {
  30621. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30622. auto maybe_layer = maybeCurrentDynamicLayer();
  30623. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30624. int64_t cur_level = maybe_layer->layerId();
  30625. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30626. return at::_ops::_foreach_log_out::call(self, out);
  30627. }
  30628. batch_rule(self, out);
  30629. }
  30630. template <typename batch_rule_t, batch_rule_t batch_rule>
  30631. void _foreach_log10_out_generated_plumbing(at::TensorList self, at::TensorList out) {
  30632. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30633. auto maybe_layer = maybeCurrentDynamicLayer();
  30634. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30635. int64_t cur_level = maybe_layer->layerId();
  30636. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30637. return at::_ops::_foreach_log10_out::call(self, out);
  30638. }
  30639. batch_rule(self, out);
  30640. }
  30641. template <typename batch_rule_t, batch_rule_t batch_rule>
  30642. void _foreach_log1p_out_generated_plumbing(at::TensorList self, at::TensorList out) {
  30643. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30644. auto maybe_layer = maybeCurrentDynamicLayer();
  30645. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30646. int64_t cur_level = maybe_layer->layerId();
  30647. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30648. return at::_ops::_foreach_log1p_out::call(self, out);
  30649. }
  30650. batch_rule(self, out);
  30651. }
  30652. template <typename batch_rule_t, batch_rule_t batch_rule>
  30653. void _foreach_log2_out_generated_plumbing(at::TensorList self, at::TensorList out) {
  30654. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30655. auto maybe_layer = maybeCurrentDynamicLayer();
  30656. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30657. int64_t cur_level = maybe_layer->layerId();
  30658. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30659. return at::_ops::_foreach_log2_out::call(self, out);
  30660. }
  30661. batch_rule(self, out);
  30662. }
  30663. template <typename batch_rule_t, batch_rule_t batch_rule>
  30664. void _foreach_neg_out_generated_plumbing(at::TensorList self, at::TensorList out) {
  30665. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30666. auto maybe_layer = maybeCurrentDynamicLayer();
  30667. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30668. int64_t cur_level = maybe_layer->layerId();
  30669. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30670. return at::_ops::_foreach_neg_out::call(self, out);
  30671. }
  30672. batch_rule(self, out);
  30673. }
  30674. template <typename batch_rule_t, batch_rule_t batch_rule>
  30675. void _foreach_tan_out_generated_plumbing(at::TensorList self, at::TensorList out) {
  30676. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30677. auto maybe_layer = maybeCurrentDynamicLayer();
  30678. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30679. int64_t cur_level = maybe_layer->layerId();
  30680. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30681. return at::_ops::_foreach_tan_out::call(self, out);
  30682. }
  30683. batch_rule(self, out);
  30684. }
  30685. template <typename batch_rule_t, batch_rule_t batch_rule>
  30686. void _foreach_tanh_out_generated_plumbing(at::TensorList self, at::TensorList out) {
  30687. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30688. auto maybe_layer = maybeCurrentDynamicLayer();
  30689. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30690. int64_t cur_level = maybe_layer->layerId();
  30691. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30692. return at::_ops::_foreach_tanh_out::call(self, out);
  30693. }
  30694. batch_rule(self, out);
  30695. }
  30696. template <typename batch_rule_t, batch_rule_t batch_rule>
  30697. void _foreach_sin_out_generated_plumbing(at::TensorList self, at::TensorList out) {
  30698. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30699. auto maybe_layer = maybeCurrentDynamicLayer();
  30700. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30701. int64_t cur_level = maybe_layer->layerId();
  30702. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30703. return at::_ops::_foreach_sin_out::call(self, out);
  30704. }
  30705. batch_rule(self, out);
  30706. }
  30707. template <typename batch_rule_t, batch_rule_t batch_rule>
  30708. void _foreach_sinh_out_generated_plumbing(at::TensorList self, at::TensorList out) {
  30709. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30710. auto maybe_layer = maybeCurrentDynamicLayer();
  30711. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30712. int64_t cur_level = maybe_layer->layerId();
  30713. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30714. return at::_ops::_foreach_sinh_out::call(self, out);
  30715. }
  30716. batch_rule(self, out);
  30717. }
  30718. template <typename batch_rule_t, batch_rule_t batch_rule>
  30719. void _foreach_round_out_generated_plumbing(at::TensorList self, at::TensorList out) {
  30720. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30721. auto maybe_layer = maybeCurrentDynamicLayer();
  30722. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30723. int64_t cur_level = maybe_layer->layerId();
  30724. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30725. return at::_ops::_foreach_round_out::call(self, out);
  30726. }
  30727. batch_rule(self, out);
  30728. }
  30729. template <typename batch_rule_t, batch_rule_t batch_rule>
  30730. void _foreach_lgamma_out_generated_plumbing(at::TensorList self, at::TensorList out) {
  30731. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30732. auto maybe_layer = maybeCurrentDynamicLayer();
  30733. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30734. int64_t cur_level = maybe_layer->layerId();
  30735. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30736. return at::_ops::_foreach_lgamma_out::call(self, out);
  30737. }
  30738. batch_rule(self, out);
  30739. }
  30740. template <typename batch_rule_t, batch_rule_t batch_rule>
  30741. void _foreach_frac_out_generated_plumbing(at::TensorList self, at::TensorList out) {
  30742. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30743. auto maybe_layer = maybeCurrentDynamicLayer();
  30744. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30745. int64_t cur_level = maybe_layer->layerId();
  30746. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30747. return at::_ops::_foreach_frac_out::call(self, out);
  30748. }
  30749. batch_rule(self, out);
  30750. }
  30751. template <typename batch_rule_t, batch_rule_t batch_rule>
  30752. void _foreach_reciprocal_out_generated_plumbing(at::TensorList self, at::TensorList out) {
  30753. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30754. auto maybe_layer = maybeCurrentDynamicLayer();
  30755. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30756. int64_t cur_level = maybe_layer->layerId();
  30757. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30758. return at::_ops::_foreach_reciprocal_out::call(self, out);
  30759. }
  30760. batch_rule(self, out);
  30761. }
  30762. template <typename batch_rule_t, batch_rule_t batch_rule>
  30763. void _foreach_sigmoid_out_generated_plumbing(at::TensorList self, at::TensorList out) {
  30764. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30765. auto maybe_layer = maybeCurrentDynamicLayer();
  30766. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30767. int64_t cur_level = maybe_layer->layerId();
  30768. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30769. return at::_ops::_foreach_sigmoid_out::call(self, out);
  30770. }
  30771. batch_rule(self, out);
  30772. }
  30773. template <typename batch_rule_t, batch_rule_t batch_rule>
  30774. void _foreach_trunc_out_generated_plumbing(at::TensorList self, at::TensorList out) {
  30775. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30776. auto maybe_layer = maybeCurrentDynamicLayer();
  30777. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30778. int64_t cur_level = maybe_layer->layerId();
  30779. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30780. return at::_ops::_foreach_trunc_out::call(self, out);
  30781. }
  30782. batch_rule(self, out);
  30783. }
  30784. template <typename batch_rule_t, batch_rule_t batch_rule>
  30785. void _foreach_addcdiv_Scalar_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out) {
  30786. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30787. auto maybe_layer = maybeCurrentDynamicLayer();
  30788. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30789. int64_t cur_level = maybe_layer->layerId();
  30790. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30791. return at::_ops::_foreach_addcdiv_Scalar_out::call(self, tensor1, tensor2, value, out);
  30792. }
  30793. batch_rule(self, tensor1, tensor2, value, out);
  30794. }
  30795. template <typename batch_rule_t, batch_rule_t batch_rule>
  30796. void _foreach_addcmul_Scalar_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out) {
  30797. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30798. auto maybe_layer = maybeCurrentDynamicLayer();
  30799. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30800. int64_t cur_level = maybe_layer->layerId();
  30801. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30802. return at::_ops::_foreach_addcmul_Scalar_out::call(self, tensor1, tensor2, value, out);
  30803. }
  30804. batch_rule(self, tensor1, tensor2, value, out);
  30805. }
  30806. template <typename batch_rule_t, batch_rule_t batch_rule>
  30807. void _foreach_addcdiv_ScalarList_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
  30808. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30809. auto maybe_layer = maybeCurrentDynamicLayer();
  30810. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30811. int64_t cur_level = maybe_layer->layerId();
  30812. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30813. return at::_ops::_foreach_addcdiv_ScalarList_out::call(self, tensor1, tensor2, scalars, out);
  30814. }
  30815. batch_rule(self, tensor1, tensor2, scalars, out);
  30816. }
  30817. template <typename batch_rule_t, batch_rule_t batch_rule>
  30818. void _foreach_addcdiv_Tensor_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars, at::TensorList out) {
  30819. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30820. auto maybe_layer = maybeCurrentDynamicLayer();
  30821. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30822. int64_t cur_level = maybe_layer->layerId();
  30823. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30824. return at::_ops::_foreach_addcdiv_Tensor_out::call(self, tensor1, tensor2, scalars, out);
  30825. }
  30826. Tensor scalars_value;
  30827. optional<int64_t> scalars_bdim;
  30828. std::tie(scalars_value, scalars_bdim) = unwrapTensorAtLevel(scalars, cur_level);
  30829. batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim, out);
  30830. }
  30831. template <typename batch_rule_t, batch_rule_t batch_rule>
  30832. void _foreach_addcmul_ScalarList_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
  30833. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30834. auto maybe_layer = maybeCurrentDynamicLayer();
  30835. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30836. int64_t cur_level = maybe_layer->layerId();
  30837. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30838. return at::_ops::_foreach_addcmul_ScalarList_out::call(self, tensor1, tensor2, scalars, out);
  30839. }
  30840. batch_rule(self, tensor1, tensor2, scalars, out);
  30841. }
  30842. template <typename batch_rule_t, batch_rule_t batch_rule>
  30843. void _foreach_addcmul_Tensor_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars, at::TensorList out) {
  30844. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30845. auto maybe_layer = maybeCurrentDynamicLayer();
  30846. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30847. int64_t cur_level = maybe_layer->layerId();
  30848. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30849. return at::_ops::_foreach_addcmul_Tensor_out::call(self, tensor1, tensor2, scalars, out);
  30850. }
  30851. Tensor scalars_value;
  30852. optional<int64_t> scalars_bdim;
  30853. std::tie(scalars_value, scalars_bdim) = unwrapTensorAtLevel(scalars, cur_level);
  30854. batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim, out);
  30855. }
  30856. template <typename batch_rule_t, batch_rule_t batch_rule>
  30857. void _foreach_norm_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & ord, at::TensorList out) {
  30858. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30859. auto maybe_layer = maybeCurrentDynamicLayer();
  30860. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30861. int64_t cur_level = maybe_layer->layerId();
  30862. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30863. return at::_ops::_foreach_norm_Scalar_out::call(self, ord, out);
  30864. }
  30865. batch_rule(self, ord, out);
  30866. }
  30867. template <typename batch_rule_t, batch_rule_t batch_rule>
  30868. void _foreach_lerp_List_out_generated_plumbing(at::TensorList self, at::TensorList tensors1, at::TensorList weights, at::TensorList out) {
  30869. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30870. auto maybe_layer = maybeCurrentDynamicLayer();
  30871. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30872. int64_t cur_level = maybe_layer->layerId();
  30873. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level) && !isBatchedAtLevel(weights, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30874. return at::_ops::_foreach_lerp_List_out::call(self, tensors1, weights, out);
  30875. }
  30876. batch_rule(self, tensors1, weights, out);
  30877. }
  30878. template <typename batch_rule_t, batch_rule_t batch_rule>
  30879. void _foreach_lerp_Scalar_out_generated_plumbing(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight, at::TensorList out) {
  30880. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30881. auto maybe_layer = maybeCurrentDynamicLayer();
  30882. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30883. int64_t cur_level = maybe_layer->layerId();
  30884. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30885. return at::_ops::_foreach_lerp_Scalar_out::call(self, tensors1, weight, out);
  30886. }
  30887. batch_rule(self, tensors1, weight, out);
  30888. }
  30889. template <typename batch_rule_t, batch_rule_t batch_rule>
  30890. void _fused_adam_out_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf, at::TensorList out) {
  30891. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30892. auto maybe_layer = maybeCurrentDynamicLayer();
  30893. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30894. int64_t cur_level = maybe_layer->layerId();
  30895. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30896. return at::_ops::_fused_adam_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out);
  30897. }
  30898. optional<Tensor> grad_scale_value;
  30899. optional<int64_t> grad_scale_bdim;
  30900. if (grad_scale) {
  30901. std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
  30902. }
  30903. optional<Tensor> found_inf_value;
  30904. optional<int64_t> found_inf_bdim;
  30905. if (found_inf) {
  30906. std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
  30907. }
  30908. batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim, out);
  30909. }
  30910. template <typename batch_rule_t, batch_rule_t batch_rule>
  30911. ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_adam_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf) {
  30912. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30913. auto maybe_layer = maybeCurrentDynamicLayer();
  30914. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  30915. int64_t cur_level = maybe_layer->layerId();
  30916. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
  30917. return at::_ops::_fused_adam::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
  30918. }
  30919. optional<Tensor> grad_scale_value;
  30920. optional<int64_t> grad_scale_bdim;
  30921. if (grad_scale) {
  30922. std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
  30923. }
  30924. optional<Tensor> found_inf_value;
  30925. optional<int64_t> found_inf_bdim;
  30926. if (found_inf) {
  30927. std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
  30928. }
  30929. auto results = batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim);
  30930. return std::make_tuple(makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level), makeBatchedVector(std::get<4>(results), std::get<5>(results), cur_level), makeBatchedVector(std::get<6>(results), std::get<7>(results), cur_level), makeBatchedVector(std::get<8>(results), std::get<9>(results), cur_level));
  30931. }
  30932. template <typename batch_rule_t, batch_rule_t batch_rule>
  30933. void _fused_adamw_out_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf, at::TensorList out) {
  30934. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30935. auto maybe_layer = maybeCurrentDynamicLayer();
  30936. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
  30937. int64_t cur_level = maybe_layer->layerId();
  30938. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(out, cur_level)) {
  30939. return at::_ops::_fused_adamw_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out);
  30940. }
  30941. optional<Tensor> grad_scale_value;
  30942. optional<int64_t> grad_scale_bdim;
  30943. if (grad_scale) {
  30944. std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
  30945. }
  30946. optional<Tensor> found_inf_value;
  30947. optional<int64_t> found_inf_bdim;
  30948. if (found_inf) {
  30949. std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
  30950. }
  30951. batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim, out);
  30952. }
  30953. template <typename batch_rule_t, batch_rule_t batch_rule>
  30954. ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_adamw_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf) {
  30955. c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
  30956. auto maybe_layer = maybeCurrentDynamicLayer();
  30957. vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
  30958. int64_t cur_level = maybe_layer->layerId();
  30959. if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
  30960. return at::_ops::_fused_adamw::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
  30961. }
  30962. optional<Tensor> grad_scale_value;
  30963. optional<int64_t> grad_scale_bdim;
  30964. if (grad_scale) {
  30965. std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
  30966. }
  30967. optional<Tensor> found_inf_value;
  30968. optional<int64_t> found_inf_bdim;
  30969. if (found_inf) {
  30970. std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
  30971. }
  30972. auto results = batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim);
  30973. return std::make_tuple(makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level), makeBatchedVector(std::get<4>(results), std::get<5>(results), cur_level), makeBatchedVector(std::get<6>(results), std::get<7>(results), cur_level), makeBatchedVector(std::get<8>(results), std::get<9>(results), cur_level));
  30974. }
  30975. }} // namespace at::functorch