1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121101221012310124101251012610127101281012910130101311013210133101341013510136101371013810139101401014110142101431014410145101461014710148101491015010151101521015310154101551015610157101581015910160101611016210163101641016510166101671016810169101701017110172101731017410175101761017710178101791018010181101821018310184101851018610187101881018910190101911019210193101941019510196101971019810199102001020110202102031020410205102061020710208102091021010211102121021310214102151021610217102181021910220102211022210223102241022510226102271022810229102301023110232102331023410235102361023710238102391024010241102421024310244102451024610247102481024910250102511025210253102541025510256102571025810259102601026110262102631026410265102661026710268102691027010271102721027310274102751027610277102781027910280102811028210283102841028510286102871028810289102901029110292102931029410295102961029710298102991030010301103021030310304103051030610307103081030910310103111031210313103141031510316103171031810319103201032110322103231032410325103261032710328103291033010331103321033310334103351033610337103381033910340103411034210343103441034510346103471034810349103501035110352103531035410355103561035710358103591036010361103621036310364103651036610367103681036910370103711037210373103741037510376103771037810379103801038110382103831038410385103861038710388103891039010391103921039310394103951039610397103981039910400104011040210403104041040510406104071040810409104101041110412104131041410415104161041710418104191042010421104221042310424104251042610427104281042910430104311043210433104341043510436104371043810439104401044110442104431044410445104461044710448104491045010451104521045310454104551045610457104581045910460104611046210463104641046510466104671046810469104701047110472104731047410475104761047710478104791048010481104821048310484104851048610487104881048910490104911049210493104941049510496104971049810499105001050110502105031050410505105061050710508105091051010511105121051310514105151051610517105181051910520105211052210523105241052510526105271052810529105301053110532105331053410535105361053710538105391054010541105421054310544105451054610547105481054910550105511055210553105541055510556105571055810559105601056110562105631056410565105661056710568105691057010571105721057310574105751057610577105781057910580105811058210583105841058510586105871058810589105901059110592105931059410595105961059710598105991060010601106021060310604106051060610607106081060910610106111061210613106141061510616106171061810619106201062110622106231062410625106261062710628106291063010631106321063310634106351063610637106381063910640106411064210643106441064510646106471064810649106501065110652106531065410655106561065710658106591066010661106621066310664106651066610667106681066910670106711067210673106741067510676106771067810679106801068110682106831068410685106861068710688106891069010691106921069310694106951069610697106981069910700107011070210703107041070510706107071070810709107101071110712107131071410715107161071710718107191072010721107221072310724107251072610727107281072910730107311073210733107341073510736107371073810739107401074110742107431074410745107461074710748107491075010751107521075310754107551075610757107581075910760107611076210763107641076510766107671076810769107701077110772107731077410775107761077710778107791078010781107821078310784107851078610787107881078910790107911079210793107941079510796107971079810799108001080110802108031080410805108061080710808108091081010811108121081310814108151081610817108181081910820108211082210823108241082510826108271082810829108301083110832108331083410835108361083710838108391084010841108421084310844108451084610847108481084910850108511085210853108541085510856108571085810859108601086110862108631086410865108661086710868108691087010871108721087310874108751087610877108781087910880108811088210883108841088510886108871088810889108901089110892108931089410895108961089710898108991090010901109021090310904109051090610907109081090910910109111091210913109141091510916109171091810919109201092110922109231092410925109261092710928109291093010931109321093310934109351093610937109381093910940109411094210943109441094510946109471094810949109501095110952109531095410955109561095710958109591096010961109621096310964109651096610967109681096910970109711097210973109741097510976109771097810979109801098110982109831098410985109861098710988109891099010991109921099310994109951099610997109981099911000110011100211003110041100511006110071100811009110101101111012110131101411015110161101711018110191102011021110221102311024110251102611027110281102911030110311103211033110341103511036110371103811039110401104111042110431104411045110461104711048110491105011051110521105311054110551105611057110581105911060110611106211063110641106511066110671106811069110701107111072110731107411075110761107711078110791108011081110821108311084110851108611087110881108911090110911109211093110941109511096110971109811099111001110111102111031110411105111061110711108111091111011111111121111311114111151111611117111181111911120111211112211123111241112511126111271112811129111301113111132111331113411135111361113711138111391114011141111421114311144111451114611147111481114911150111511115211153111541115511156111571115811159111601116111162111631116411165111661116711168111691117011171111721117311174111751117611177111781117911180111811118211183111841118511186111871118811189111901119111192111931119411195111961119711198111991120011201112021120311204112051120611207112081120911210112111121211213112141121511216112171121811219112201122111222112231122411225112261122711228112291123011231112321123311234112351123611237112381123911240112411124211243112441124511246112471124811249112501125111252112531125411255112561125711258112591126011261112621126311264112651126611267112681126911270112711127211273112741127511276112771127811279112801128111282112831128411285112861128711288112891129011291112921129311294112951129611297112981129911300113011130211303113041130511306113071130811309113101131111312113131131411315113161131711318113191132011321113221132311324113251132611327113281132911330113311133211333113341133511336113371133811339113401134111342113431134411345113461134711348113491135011351113521135311354113551135611357113581135911360113611136211363113641136511366113671136811369113701137111372113731137411375113761137711378113791138011381113821138311384113851138611387113881138911390113911139211393113941139511396113971139811399114001140111402114031140411405114061140711408114091141011411114121141311414114151141611417114181141911420114211142211423114241142511426114271142811429114301143111432114331143411435114361143711438114391144011441114421144311444114451144611447114481144911450114511145211453114541145511456114571145811459114601146111462114631146411465114661146711468114691147011471114721147311474114751147611477114781147911480114811148211483114841148511486114871148811489114901149111492114931149411495114961149711498114991150011501115021150311504115051150611507115081150911510115111151211513115141151511516115171151811519115201152111522115231152411525115261152711528115291153011531115321153311534115351153611537115381153911540115411154211543115441154511546115471154811549115501155111552115531155411555115561155711558115591156011561115621156311564115651156611567115681156911570115711157211573115741157511576115771157811579115801158111582115831158411585115861158711588115891159011591115921159311594115951159611597115981159911600116011160211603116041160511606116071160811609116101161111612116131161411615116161161711618116191162011621116221162311624116251162611627116281162911630116311163211633116341163511636116371163811639116401164111642116431164411645116461164711648116491165011651116521165311654116551165611657116581165911660116611166211663116641166511666116671166811669116701167111672116731167411675116761167711678116791168011681116821168311684116851168611687116881168911690116911169211693116941169511696116971169811699117001170111702117031170411705117061170711708117091171011711117121171311714117151171611717117181171911720117211172211723117241172511726117271172811729117301173111732117331173411735117361173711738117391174011741117421174311744117451174611747117481174911750117511175211753117541175511756117571175811759117601176111762117631176411765117661176711768117691177011771117721177311774117751177611777117781177911780117811178211783117841178511786117871178811789117901179111792117931179411795117961179711798117991180011801118021180311804118051180611807118081180911810118111181211813118141181511816118171181811819118201182111822118231182411825118261182711828118291183011831118321183311834118351183611837118381183911840118411184211843118441184511846118471184811849118501185111852118531185411855118561185711858118591186011861118621186311864118651186611867118681186911870118711187211873118741187511876118771187811879118801188111882118831188411885118861188711888118891189011891118921189311894118951189611897118981189911900119011190211903119041190511906119071190811909119101191111912119131191411915119161191711918119191192011921119221192311924119251192611927119281192911930119311193211933119341193511936119371193811939119401194111942119431194411945119461194711948119491195011951119521195311954119551195611957119581195911960119611196211963119641196511966119671196811969119701197111972119731197411975119761197711978119791198011981119821198311984119851198611987119881198911990119911199211993119941199511996119971199811999120001200112002120031200412005120061200712008120091201012011120121201312014120151201612017120181201912020120211202212023120241202512026120271202812029120301203112032120331203412035120361203712038120391204012041120421204312044120451204612047120481204912050120511205212053120541205512056120571205812059120601206112062120631206412065120661206712068120691207012071120721207312074120751207612077120781207912080120811208212083120841208512086120871208812089120901209112092120931209412095120961209712098120991210012101121021210312104121051210612107121081210912110121111211212113121141211512116121171211812119121201212112122121231212412125121261212712128121291213012131121321213312134121351213612137121381213912140121411214212143121441214512146121471214812149121501215112152121531215412155121561215712158121591216012161121621216312164121651216612167121681216912170121711217212173121741217512176121771217812179121801218112182121831218412185121861218712188121891219012191121921219312194121951219612197121981219912200122011220212203122041220512206122071220812209122101221112212122131221412215122161221712218122191222012221122221222312224122251222612227122281222912230122311223212233122341223512236122371223812239122401224112242122431224412245122461224712248122491225012251122521225312254122551225612257122581225912260122611226212263122641226512266122671226812269122701227112272122731227412275122761227712278122791228012281122821228312284122851228612287122881228912290122911229212293122941229512296122971229812299123001230112302123031230412305123061230712308123091231012311123121231312314123151231612317123181231912320123211232212323123241232512326123271232812329123301233112332123331233412335123361233712338123391234012341123421234312344123451234612347123481234912350123511235212353123541235512356123571235812359123601236112362123631236412365123661236712368123691237012371123721237312374123751237612377123781237912380123811238212383123841238512386123871238812389123901239112392123931239412395123961239712398123991240012401124021240312404124051240612407124081240912410124111241212413124141241512416124171241812419124201242112422124231242412425124261242712428124291243012431124321243312434124351243612437124381243912440124411244212443124441244512446124471244812449124501245112452124531245412455124561245712458124591246012461124621246312464124651246612467124681246912470124711247212473124741247512476124771247812479124801248112482124831248412485124861248712488124891249012491124921249312494124951249612497124981249912500125011250212503125041250512506125071250812509125101251112512125131251412515125161251712518125191252012521125221252312524125251252612527125281252912530125311253212533125341253512536125371253812539125401254112542125431254412545125461254712548125491255012551125521255312554125551255612557125581255912560125611256212563125641256512566125671256812569125701257112572125731257412575125761257712578125791258012581125821258312584125851258612587125881258912590125911259212593125941259512596125971259812599126001260112602126031260412605126061260712608126091261012611126121261312614126151261612617126181261912620126211262212623126241262512626126271262812629126301263112632126331263412635126361263712638126391264012641126421264312644126451264612647126481264912650126511265212653126541265512656126571265812659126601266112662126631266412665126661266712668126691267012671126721267312674126751267612677126781267912680126811268212683126841268512686126871268812689126901269112692126931269412695126961269712698126991270012701127021270312704127051270612707127081270912710127111271212713127141271512716127171271812719127201272112722127231272412725127261272712728127291273012731127321273312734127351273612737127381273912740127411274212743127441274512746127471274812749127501275112752127531275412755127561275712758127591276012761127621276312764127651276612767127681276912770127711277212773127741277512776127771277812779127801278112782127831278412785127861278712788127891279012791127921279312794127951279612797127981279912800128011280212803128041280512806128071280812809128101281112812128131281412815128161281712818128191282012821128221282312824128251282612827128281282912830128311283212833128341283512836128371283812839128401284112842128431284412845128461284712848128491285012851128521285312854128551285612857128581285912860128611286212863128641286512866128671286812869128701287112872128731287412875128761287712878128791288012881128821288312884128851288612887128881288912890128911289212893128941289512896128971289812899129001290112902129031290412905129061290712908129091291012911129121291312914129151291612917129181291912920129211292212923129241292512926129271292812929129301293112932129331293412935129361293712938129391294012941129421294312944129451294612947129481294912950129511295212953129541295512956129571295812959129601296112962129631296412965129661296712968129691297012971129721297312974129751297612977129781297912980129811298212983129841298512986129871298812989129901299112992129931299412995129961299712998129991300013001130021300313004130051300613007130081300913010130111301213013130141301513016130171301813019130201302113022130231302413025130261302713028130291303013031130321303313034130351303613037130381303913040130411304213043130441304513046130471304813049130501305113052130531305413055130561305713058130591306013061130621306313064130651306613067130681306913070130711307213073130741307513076130771307813079130801308113082130831308413085130861308713088130891309013091130921309313094130951309613097130981309913100131011310213103131041310513106131071310813109131101311113112131131311413115131161311713118131191312013121131221312313124131251312613127131281312913130131311313213133131341313513136131371313813139131401314113142131431314413145131461314713148131491315013151131521315313154131551315613157131581315913160131611316213163131641316513166131671316813169131701317113172131731317413175131761317713178131791318013181131821318313184131851318613187131881318913190131911319213193131941319513196131971319813199132001320113202132031320413205132061320713208132091321013211132121321313214132151321613217132181321913220132211322213223132241322513226132271322813229132301323113232132331323413235132361323713238132391324013241132421324313244132451324613247132481324913250132511325213253132541325513256132571325813259132601326113262132631326413265132661326713268132691327013271132721327313274132751327613277132781327913280132811328213283132841328513286132871328813289132901329113292132931329413295132961329713298132991330013301133021330313304133051330613307133081330913310133111331213313133141331513316133171331813319133201332113322133231332413325133261332713328133291333013331133321333313334133351333613337133381333913340133411334213343133441334513346133471334813349133501335113352133531335413355133561335713358133591336013361133621336313364133651336613367133681336913370133711337213373133741337513376133771337813379133801338113382133831338413385133861338713388133891339013391133921339313394133951339613397133981339913400134011340213403134041340513406134071340813409134101341113412134131341413415134161341713418134191342013421134221342313424134251342613427134281342913430134311343213433134341343513436134371343813439134401344113442134431344413445134461344713448134491345013451134521345313454134551345613457134581345913460134611346213463134641346513466134671346813469134701347113472134731347413475134761347713478134791348013481134821348313484134851348613487134881348913490134911349213493134941349513496134971349813499135001350113502135031350413505135061350713508135091351013511135121351313514135151351613517135181351913520135211352213523135241352513526135271352813529135301353113532135331353413535135361353713538135391354013541135421354313544135451354613547135481354913550135511355213553135541355513556135571355813559135601356113562135631356413565135661356713568135691357013571135721357313574135751357613577135781357913580135811358213583135841358513586135871358813589135901359113592135931359413595135961359713598135991360013601136021360313604136051360613607136081360913610136111361213613136141361513616136171361813619136201362113622136231362413625136261362713628136291363013631136321363313634136351363613637136381363913640136411364213643136441364513646136471364813649136501365113652136531365413655136561365713658136591366013661136621366313664136651366613667136681366913670136711367213673136741367513676136771367813679136801368113682136831368413685136861368713688136891369013691136921369313694136951369613697136981369913700137011370213703137041370513706137071370813709137101371113712137131371413715137161371713718137191372013721137221372313724137251372613727137281372913730137311373213733137341373513736137371373813739137401374113742137431374413745137461374713748137491375013751137521375313754137551375613757137581375913760137611376213763137641376513766137671376813769137701377113772137731377413775137761377713778137791378013781137821378313784137851378613787137881378913790137911379213793137941379513796137971379813799138001380113802138031380413805138061380713808138091381013811138121381313814138151381613817138181381913820138211382213823138241382513826138271382813829138301383113832138331383413835138361383713838138391384013841138421384313844138451384613847138481384913850138511385213853138541385513856138571385813859138601386113862138631386413865138661386713868138691387013871138721387313874138751387613877138781387913880138811388213883138841388513886138871388813889138901389113892138931389413895138961389713898138991390013901139021390313904139051390613907139081390913910139111391213913139141391513916139171391813919139201392113922139231392413925139261392713928139291393013931139321393313934139351393613937139381393913940139411394213943139441394513946139471394813949139501395113952139531395413955139561395713958139591396013961139621396313964139651396613967139681396913970139711397213973139741397513976139771397813979139801398113982139831398413985139861398713988139891399013991139921399313994139951399613997139981399914000140011400214003140041400514006140071400814009140101401114012140131401414015140161401714018140191402014021140221402314024140251402614027140281402914030140311403214033140341403514036140371403814039140401404114042140431404414045140461404714048140491405014051140521405314054140551405614057140581405914060140611406214063140641406514066140671406814069140701407114072140731407414075140761407714078140791408014081140821408314084140851408614087140881408914090140911409214093140941409514096140971409814099141001410114102141031410414105141061410714108141091411014111141121411314114141151411614117141181411914120141211412214123141241412514126141271412814129141301413114132141331413414135141361413714138141391414014141141421414314144141451414614147141481414914150141511415214153141541415514156141571415814159141601416114162141631416414165141661416714168141691417014171141721417314174141751417614177141781417914180141811418214183141841418514186141871418814189141901419114192141931419414195141961419714198141991420014201142021420314204142051420614207142081420914210142111421214213142141421514216142171421814219142201422114222142231422414225142261422714228142291423014231142321423314234142351423614237142381423914240142411424214243142441424514246142471424814249142501425114252142531425414255142561425714258142591426014261142621426314264142651426614267142681426914270142711427214273142741427514276142771427814279142801428114282142831428414285142861428714288142891429014291142921429314294142951429614297142981429914300143011430214303143041430514306143071430814309143101431114312143131431414315143161431714318143191432014321143221432314324143251432614327143281432914330143311433214333143341433514336143371433814339143401434114342143431434414345143461434714348143491435014351143521435314354143551435614357143581435914360143611436214363143641436514366143671436814369143701437114372143731437414375143761437714378143791438014381143821438314384143851438614387143881438914390143911439214393143941439514396143971439814399144001440114402144031440414405144061440714408144091441014411144121441314414144151441614417144181441914420144211442214423144241442514426144271442814429144301443114432144331443414435144361443714438144391444014441144421444314444144451444614447144481444914450144511445214453144541445514456144571445814459144601446114462144631446414465144661446714468144691447014471144721447314474144751447614477144781447914480144811448214483144841448514486144871448814489144901449114492144931449414495144961449714498144991450014501145021450314504145051450614507145081450914510145111451214513145141451514516145171451814519145201452114522145231452414525145261452714528145291453014531145321453314534145351453614537145381453914540145411454214543145441454514546145471454814549145501455114552145531455414555145561455714558145591456014561145621456314564145651456614567145681456914570145711457214573145741457514576145771457814579145801458114582145831458414585145861458714588145891459014591145921459314594145951459614597145981459914600146011460214603146041460514606146071460814609146101461114612146131461414615146161461714618146191462014621146221462314624146251462614627146281462914630146311463214633146341463514636146371463814639146401464114642146431464414645146461464714648146491465014651146521465314654146551465614657146581465914660146611466214663146641466514666146671466814669146701467114672146731467414675146761467714678146791468014681146821468314684146851468614687146881468914690146911469214693146941469514696146971469814699147001470114702147031470414705147061470714708147091471014711147121471314714147151471614717147181471914720147211472214723147241472514726147271472814729147301473114732147331473414735147361473714738147391474014741147421474314744147451474614747147481474914750147511475214753147541475514756147571475814759147601476114762147631476414765147661476714768147691477014771147721477314774147751477614777147781477914780147811478214783147841478514786147871478814789147901479114792147931479414795147961479714798147991480014801148021480314804148051480614807148081480914810148111481214813148141481514816148171481814819148201482114822148231482414825148261482714828148291483014831148321483314834148351483614837148381483914840148411484214843148441484514846148471484814849148501485114852148531485414855148561485714858148591486014861148621486314864148651486614867148681486914870148711487214873148741487514876148771487814879148801488114882148831488414885148861488714888148891489014891148921489314894148951489614897148981489914900149011490214903149041490514906149071490814909149101491114912149131491414915149161491714918149191492014921149221492314924149251492614927149281492914930149311493214933149341493514936149371493814939149401494114942149431494414945149461494714948149491495014951149521495314954149551495614957149581495914960149611496214963149641496514966149671496814969149701497114972149731497414975149761497714978149791498014981149821498314984149851498614987149881498914990149911499214993149941499514996149971499814999150001500115002150031500415005150061500715008150091501015011150121501315014150151501615017150181501915020150211502215023150241502515026150271502815029150301503115032150331503415035150361503715038150391504015041150421504315044150451504615047150481504915050150511505215053150541505515056150571505815059150601506115062150631506415065150661506715068150691507015071150721507315074150751507615077150781507915080150811508215083150841508515086150871508815089150901509115092150931509415095150961509715098150991510015101151021510315104151051510615107151081510915110151111511215113151141511515116151171511815119151201512115122151231512415125151261512715128151291513015131151321513315134151351513615137151381513915140151411514215143151441514515146151471514815149151501515115152151531515415155151561515715158151591516015161151621516315164151651516615167151681516915170151711517215173151741517515176151771517815179151801518115182151831518415185151861518715188151891519015191151921519315194151951519615197151981519915200152011520215203152041520515206152071520815209152101521115212152131521415215152161521715218152191522015221152221522315224152251522615227152281522915230152311523215233152341523515236152371523815239152401524115242152431524415245152461524715248152491525015251152521525315254152551525615257152581525915260152611526215263152641526515266152671526815269152701527115272152731527415275152761527715278152791528015281152821528315284152851528615287152881528915290152911529215293152941529515296152971529815299153001530115302153031530415305153061530715308153091531015311153121531315314153151531615317153181531915320153211532215323153241532515326153271532815329153301533115332153331533415335153361533715338153391534015341153421534315344153451534615347153481534915350153511535215353153541535515356153571535815359153601536115362153631536415365153661536715368153691537015371153721537315374153751537615377153781537915380153811538215383153841538515386153871538815389153901539115392153931539415395153961539715398153991540015401154021540315404154051540615407154081540915410154111541215413154141541515416154171541815419154201542115422154231542415425154261542715428154291543015431154321543315434154351543615437154381543915440154411544215443154441544515446154471544815449154501545115452154531545415455154561545715458154591546015461154621546315464154651546615467154681546915470154711547215473154741547515476154771547815479154801548115482154831548415485154861548715488154891549015491154921549315494154951549615497154981549915500155011550215503155041550515506155071550815509155101551115512155131551415515155161551715518155191552015521155221552315524155251552615527155281552915530155311553215533155341553515536155371553815539155401554115542155431554415545155461554715548155491555015551155521555315554155551555615557155581555915560155611556215563155641556515566155671556815569155701557115572155731557415575155761557715578155791558015581155821558315584155851558615587155881558915590155911559215593155941559515596155971559815599156001560115602156031560415605156061560715608156091561015611156121561315614156151561615617156181561915620156211562215623156241562515626156271562815629156301563115632156331563415635156361563715638156391564015641156421564315644156451564615647156481564915650156511565215653156541565515656156571565815659156601566115662156631566415665156661566715668156691567015671156721567315674156751567615677156781567915680156811568215683156841568515686156871568815689156901569115692156931569415695156961569715698156991570015701157021570315704157051570615707157081570915710157111571215713157141571515716157171571815719157201572115722157231572415725157261572715728157291573015731157321573315734157351573615737157381573915740157411574215743157441574515746157471574815749157501575115752157531575415755157561575715758157591576015761157621576315764157651576615767157681576915770157711577215773157741577515776157771577815779157801578115782157831578415785157861578715788157891579015791157921579315794157951579615797157981579915800158011580215803158041580515806158071580815809158101581115812158131581415815158161581715818158191582015821158221582315824158251582615827158281582915830158311583215833158341583515836158371583815839158401584115842158431584415845158461584715848158491585015851158521585315854158551585615857158581585915860158611586215863158641586515866158671586815869158701587115872158731587415875158761587715878158791588015881158821588315884158851588615887158881588915890158911589215893158941589515896158971589815899159001590115902159031590415905159061590715908159091591015911159121591315914159151591615917159181591915920159211592215923159241592515926159271592815929159301593115932159331593415935159361593715938159391594015941159421594315944159451594615947159481594915950159511595215953159541595515956159571595815959159601596115962159631596415965159661596715968159691597015971159721597315974159751597615977159781597915980159811598215983159841598515986159871598815989159901599115992159931599415995159961599715998159991600016001160021600316004160051600616007160081600916010160111601216013160141601516016160171601816019160201602116022160231602416025160261602716028160291603016031160321603316034160351603616037160381603916040160411604216043160441604516046160471604816049160501605116052160531605416055160561605716058160591606016061160621606316064160651606616067160681606916070160711607216073160741607516076160771607816079160801608116082160831608416085160861608716088160891609016091160921609316094160951609616097160981609916100161011610216103161041610516106161071610816109161101611116112161131611416115161161611716118161191612016121161221612316124161251612616127161281612916130161311613216133161341613516136161371613816139161401614116142161431614416145161461614716148161491615016151161521615316154161551615616157161581615916160161611616216163161641616516166161671616816169161701617116172161731617416175161761617716178161791618016181161821618316184161851618616187161881618916190161911619216193161941619516196161971619816199162001620116202162031620416205162061620716208162091621016211162121621316214162151621616217162181621916220162211622216223162241622516226162271622816229162301623116232162331623416235162361623716238162391624016241162421624316244162451624616247162481624916250162511625216253162541625516256162571625816259162601626116262162631626416265162661626716268162691627016271162721627316274162751627616277162781627916280162811628216283162841628516286162871628816289162901629116292162931629416295162961629716298162991630016301163021630316304163051630616307163081630916310163111631216313163141631516316163171631816319163201632116322163231632416325163261632716328163291633016331163321633316334163351633616337163381633916340163411634216343163441634516346163471634816349163501635116352163531635416355163561635716358163591636016361163621636316364163651636616367163681636916370163711637216373163741637516376163771637816379163801638116382163831638416385163861638716388163891639016391163921639316394163951639616397163981639916400164011640216403164041640516406164071640816409164101641116412164131641416415164161641716418164191642016421164221642316424164251642616427164281642916430164311643216433164341643516436164371643816439164401644116442164431644416445164461644716448164491645016451164521645316454164551645616457164581645916460164611646216463164641646516466164671646816469164701647116472164731647416475164761647716478164791648016481164821648316484164851648616487164881648916490164911649216493164941649516496164971649816499165001650116502165031650416505165061650716508165091651016511165121651316514165151651616517165181651916520165211652216523165241652516526165271652816529165301653116532165331653416535165361653716538165391654016541165421654316544165451654616547165481654916550165511655216553165541655516556165571655816559165601656116562165631656416565165661656716568165691657016571165721657316574165751657616577165781657916580165811658216583165841658516586165871658816589165901659116592165931659416595165961659716598165991660016601166021660316604166051660616607166081660916610166111661216613166141661516616166171661816619166201662116622166231662416625166261662716628166291663016631166321663316634166351663616637166381663916640166411664216643166441664516646166471664816649166501665116652166531665416655166561665716658166591666016661166621666316664166651666616667166681666916670166711667216673166741667516676166771667816679166801668116682166831668416685166861668716688166891669016691166921669316694166951669616697166981669916700167011670216703167041670516706167071670816709167101671116712167131671416715167161671716718167191672016721167221672316724167251672616727167281672916730167311673216733167341673516736167371673816739167401674116742167431674416745167461674716748167491675016751167521675316754167551675616757167581675916760167611676216763167641676516766167671676816769167701677116772167731677416775167761677716778167791678016781167821678316784167851678616787167881678916790167911679216793167941679516796167971679816799168001680116802168031680416805168061680716808168091681016811168121681316814168151681616817168181681916820168211682216823168241682516826168271682816829168301683116832168331683416835168361683716838168391684016841168421684316844168451684616847168481684916850168511685216853168541685516856168571685816859168601686116862168631686416865168661686716868168691687016871168721687316874168751687616877168781687916880168811688216883168841688516886168871688816889168901689116892168931689416895168961689716898168991690016901169021690316904169051690616907169081690916910169111691216913169141691516916169171691816919169201692116922169231692416925169261692716928169291693016931169321693316934169351693616937169381693916940169411694216943169441694516946169471694816949169501695116952169531695416955169561695716958169591696016961169621696316964169651696616967169681696916970169711697216973169741697516976169771697816979169801698116982169831698416985169861698716988169891699016991169921699316994169951699616997169981699917000170011700217003170041700517006170071700817009170101701117012170131701417015170161701717018170191702017021170221702317024170251702617027170281702917030170311703217033170341703517036170371703817039170401704117042170431704417045170461704717048170491705017051170521705317054170551705617057170581705917060170611706217063170641706517066170671706817069170701707117072170731707417075170761707717078170791708017081170821708317084170851708617087170881708917090170911709217093170941709517096170971709817099171001710117102171031710417105171061710717108171091711017111171121711317114171151711617117171181711917120171211712217123171241712517126171271712817129171301713117132171331713417135171361713717138171391714017141171421714317144171451714617147171481714917150171511715217153171541715517156171571715817159171601716117162171631716417165171661716717168171691717017171171721717317174171751717617177171781717917180171811718217183171841718517186171871718817189171901719117192171931719417195171961719717198171991720017201172021720317204172051720617207172081720917210172111721217213172141721517216172171721817219172201722117222172231722417225172261722717228172291723017231172321723317234172351723617237172381723917240172411724217243172441724517246172471724817249172501725117252172531725417255172561725717258172591726017261172621726317264172651726617267172681726917270172711727217273172741727517276172771727817279172801728117282172831728417285172861728717288172891729017291172921729317294172951729617297172981729917300173011730217303173041730517306173071730817309173101731117312173131731417315173161731717318173191732017321173221732317324173251732617327173281732917330173311733217333173341733517336173371733817339173401734117342173431734417345173461734717348173491735017351173521735317354173551735617357173581735917360173611736217363173641736517366173671736817369173701737117372173731737417375173761737717378173791738017381173821738317384173851738617387173881738917390173911739217393173941739517396173971739817399174001740117402174031740417405174061740717408174091741017411174121741317414174151741617417174181741917420174211742217423174241742517426174271742817429174301743117432174331743417435174361743717438174391744017441174421744317444174451744617447174481744917450174511745217453174541745517456174571745817459174601746117462174631746417465174661746717468174691747017471174721747317474174751747617477174781747917480174811748217483174841748517486174871748817489174901749117492174931749417495174961749717498174991750017501175021750317504175051750617507175081750917510175111751217513175141751517516175171751817519175201752117522175231752417525175261752717528175291753017531175321753317534175351753617537175381753917540175411754217543175441754517546175471754817549175501755117552175531755417555175561755717558175591756017561175621756317564175651756617567175681756917570175711757217573175741757517576175771757817579175801758117582175831758417585175861758717588175891759017591175921759317594175951759617597175981759917600176011760217603176041760517606176071760817609176101761117612176131761417615176161761717618176191762017621176221762317624176251762617627176281762917630176311763217633176341763517636176371763817639176401764117642176431764417645176461764717648176491765017651176521765317654176551765617657176581765917660176611766217663176641766517666176671766817669176701767117672176731767417675176761767717678176791768017681176821768317684176851768617687176881768917690176911769217693176941769517696176971769817699177001770117702177031770417705177061770717708177091771017711177121771317714177151771617717177181771917720177211772217723177241772517726177271772817729177301773117732177331773417735177361773717738177391774017741177421774317744177451774617747177481774917750177511775217753177541775517756177571775817759177601776117762177631776417765177661776717768177691777017771177721777317774177751777617777177781777917780177811778217783177841778517786177871778817789177901779117792177931779417795177961779717798177991780017801178021780317804178051780617807178081780917810178111781217813178141781517816178171781817819178201782117822178231782417825178261782717828178291783017831178321783317834178351783617837178381783917840178411784217843178441784517846178471784817849178501785117852178531785417855178561785717858178591786017861178621786317864178651786617867178681786917870178711787217873178741787517876178771787817879178801788117882178831788417885178861788717888178891789017891178921789317894178951789617897178981789917900179011790217903179041790517906179071790817909179101791117912179131791417915179161791717918179191792017921179221792317924179251792617927179281792917930179311793217933179341793517936179371793817939179401794117942179431794417945179461794717948179491795017951179521795317954179551795617957179581795917960179611796217963179641796517966179671796817969179701797117972179731797417975179761797717978179791798017981179821798317984179851798617987179881798917990179911799217993179941799517996179971799817999180001800118002180031800418005180061800718008180091801018011180121801318014180151801618017180181801918020180211802218023180241802518026180271802818029180301803118032180331803418035180361803718038180391804018041180421804318044180451804618047180481804918050180511805218053180541805518056180571805818059180601806118062180631806418065180661806718068180691807018071180721807318074180751807618077180781807918080180811808218083180841808518086180871808818089180901809118092180931809418095180961809718098180991810018101181021810318104181051810618107181081810918110181111811218113181141811518116181171811818119181201812118122181231812418125181261812718128181291813018131181321813318134181351813618137181381813918140181411814218143181441814518146181471814818149181501815118152181531815418155181561815718158181591816018161181621816318164181651816618167181681816918170181711817218173181741817518176181771817818179181801818118182181831818418185181861818718188181891819018191181921819318194181951819618197181981819918200182011820218203182041820518206182071820818209182101821118212182131821418215182161821718218182191822018221182221822318224182251822618227182281822918230182311823218233182341823518236182371823818239182401824118242182431824418245182461824718248182491825018251182521825318254182551825618257182581825918260182611826218263182641826518266182671826818269182701827118272182731827418275182761827718278182791828018281182821828318284182851828618287182881828918290182911829218293182941829518296182971829818299183001830118302183031830418305183061830718308183091831018311183121831318314183151831618317183181831918320183211832218323183241832518326183271832818329183301833118332183331833418335183361833718338183391834018341183421834318344183451834618347183481834918350183511835218353183541835518356183571835818359183601836118362183631836418365183661836718368183691837018371183721837318374183751837618377183781837918380183811838218383183841838518386183871838818389183901839118392183931839418395183961839718398183991840018401184021840318404184051840618407184081840918410184111841218413184141841518416184171841818419184201842118422184231842418425184261842718428184291843018431184321843318434184351843618437184381843918440184411844218443184441844518446184471844818449184501845118452184531845418455184561845718458184591846018461184621846318464184651846618467184681846918470184711847218473184741847518476184771847818479184801848118482184831848418485184861848718488184891849018491184921849318494184951849618497184981849918500185011850218503185041850518506185071850818509185101851118512185131851418515185161851718518185191852018521185221852318524185251852618527185281852918530185311853218533185341853518536185371853818539185401854118542185431854418545185461854718548185491855018551185521855318554185551855618557185581855918560185611856218563185641856518566185671856818569185701857118572185731857418575185761857718578185791858018581185821858318584185851858618587185881858918590185911859218593185941859518596185971859818599186001860118602186031860418605186061860718608186091861018611186121861318614186151861618617186181861918620186211862218623186241862518626186271862818629186301863118632186331863418635186361863718638186391864018641186421864318644186451864618647186481864918650186511865218653186541865518656186571865818659186601866118662186631866418665186661866718668186691867018671186721867318674186751867618677186781867918680186811868218683186841868518686186871868818689186901869118692186931869418695186961869718698186991870018701187021870318704187051870618707187081870918710187111871218713187141871518716187171871818719187201872118722187231872418725187261872718728187291873018731187321873318734187351873618737187381873918740187411874218743187441874518746187471874818749187501875118752187531875418755187561875718758187591876018761187621876318764187651876618767187681876918770187711877218773187741877518776187771877818779187801878118782187831878418785187861878718788187891879018791187921879318794187951879618797187981879918800188011880218803188041880518806188071880818809188101881118812188131881418815188161881718818188191882018821188221882318824188251882618827188281882918830188311883218833188341883518836188371883818839188401884118842188431884418845188461884718848188491885018851188521885318854188551885618857188581885918860188611886218863188641886518866188671886818869188701887118872188731887418875188761887718878188791888018881188821888318884188851888618887188881888918890188911889218893188941889518896188971889818899189001890118902189031890418905189061890718908189091891018911189121891318914189151891618917189181891918920189211892218923189241892518926189271892818929189301893118932189331893418935189361893718938189391894018941189421894318944189451894618947189481894918950189511895218953189541895518956189571895818959189601896118962189631896418965189661896718968189691897018971189721897318974189751897618977189781897918980189811898218983189841898518986189871898818989189901899118992189931899418995189961899718998189991900019001190021900319004190051900619007190081900919010190111901219013190141901519016190171901819019190201902119022190231902419025190261902719028190291903019031190321903319034190351903619037190381903919040190411904219043190441904519046190471904819049190501905119052190531905419055190561905719058190591906019061190621906319064190651906619067190681906919070190711907219073190741907519076190771907819079190801908119082190831908419085190861908719088190891909019091190921909319094190951909619097190981909919100191011910219103191041910519106191071910819109191101911119112191131911419115191161911719118191191912019121191221912319124191251912619127191281912919130191311913219133191341913519136191371913819139191401914119142191431914419145191461914719148191491915019151191521915319154191551915619157191581915919160191611916219163191641916519166191671916819169191701917119172191731917419175191761917719178191791918019181191821918319184191851918619187191881918919190191911919219193191941919519196191971919819199192001920119202192031920419205192061920719208192091921019211192121921319214192151921619217192181921919220192211922219223192241922519226192271922819229192301923119232192331923419235192361923719238192391924019241192421924319244192451924619247192481924919250192511925219253192541925519256192571925819259192601926119262192631926419265192661926719268192691927019271192721927319274192751927619277192781927919280192811928219283192841928519286192871928819289192901929119292192931929419295192961929719298192991930019301193021930319304193051930619307193081930919310193111931219313193141931519316193171931819319193201932119322193231932419325193261932719328193291933019331193321933319334193351933619337193381933919340193411934219343193441934519346193471934819349193501935119352193531935419355193561935719358193591936019361193621936319364193651936619367193681936919370193711937219373193741937519376193771937819379193801938119382193831938419385193861938719388193891939019391193921939319394193951939619397193981939919400194011940219403194041940519406194071940819409194101941119412194131941419415194161941719418194191942019421194221942319424194251942619427194281942919430194311943219433194341943519436194371943819439194401944119442194431944419445194461944719448194491945019451194521945319454194551945619457194581945919460194611946219463194641946519466194671946819469194701947119472194731947419475194761947719478194791948019481194821948319484194851948619487194881948919490194911949219493194941949519496194971949819499195001950119502195031950419505195061950719508195091951019511195121951319514195151951619517195181951919520195211952219523195241952519526195271952819529195301953119532195331953419535195361953719538195391954019541195421954319544195451954619547195481954919550195511955219553195541955519556195571955819559195601956119562195631956419565195661956719568195691957019571195721957319574195751957619577195781957919580195811958219583195841958519586195871958819589195901959119592195931959419595195961959719598195991960019601196021960319604196051960619607196081960919610196111961219613196141961519616196171961819619196201962119622196231962419625196261962719628196291963019631196321963319634196351963619637196381963919640196411964219643196441964519646196471964819649196501965119652196531965419655196561965719658196591966019661196621966319664196651966619667196681966919670196711967219673196741967519676196771967819679196801968119682196831968419685196861968719688196891969019691196921969319694196951969619697196981969919700197011970219703197041970519706197071970819709197101971119712197131971419715197161971719718197191972019721197221972319724197251972619727197281972919730197311973219733197341973519736197371973819739197401974119742197431974419745197461974719748197491975019751197521975319754197551975619757197581975919760197611976219763197641976519766197671976819769197701977119772197731977419775197761977719778197791978019781197821978319784197851978619787197881978919790197911979219793197941979519796197971979819799198001980119802198031980419805198061980719808198091981019811198121981319814198151981619817198181981919820198211982219823198241982519826198271982819829198301983119832198331983419835198361983719838198391984019841198421984319844198451984619847198481984919850198511985219853198541985519856198571985819859198601986119862198631986419865198661986719868198691987019871198721987319874198751987619877198781987919880198811988219883198841988519886198871988819889198901989119892198931989419895198961989719898198991990019901199021990319904199051990619907199081990919910199111991219913199141991519916199171991819919199201992119922199231992419925199261992719928199291993019931199321993319934199351993619937199381993919940199411994219943199441994519946199471994819949199501995119952199531995419955199561995719958199591996019961199621996319964199651996619967199681996919970199711997219973199741997519976199771997819979199801998119982199831998419985199861998719988199891999019991199921999319994199951999619997199981999920000200012000220003200042000520006200072000820009200102001120012200132001420015200162001720018200192002020021200222002320024200252002620027200282002920030200312003220033200342003520036200372003820039200402004120042200432004420045200462004720048200492005020051200522005320054200552005620057200582005920060200612006220063200642006520066200672006820069200702007120072200732007420075200762007720078200792008020081200822008320084200852008620087200882008920090200912009220093200942009520096200972009820099201002010120102201032010420105201062010720108201092011020111201122011320114201152011620117201182011920120201212012220123201242012520126201272012820129201302013120132201332013420135201362013720138201392014020141201422014320144201452014620147201482014920150201512015220153201542015520156201572015820159201602016120162201632016420165201662016720168201692017020171201722017320174201752017620177201782017920180201812018220183201842018520186201872018820189201902019120192201932019420195201962019720198201992020020201202022020320204202052020620207202082020920210202112021220213202142021520216202172021820219202202022120222202232022420225202262022720228202292023020231202322023320234202352023620237202382023920240202412024220243202442024520246202472024820249202502025120252202532025420255202562025720258202592026020261202622026320264202652026620267202682026920270202712027220273202742027520276202772027820279202802028120282202832028420285202862028720288202892029020291202922029320294202952029620297202982029920300203012030220303203042030520306203072030820309203102031120312203132031420315203162031720318203192032020321203222032320324203252032620327203282032920330203312033220333203342033520336203372033820339203402034120342203432034420345203462034720348203492035020351203522035320354203552035620357203582035920360203612036220363203642036520366203672036820369203702037120372203732037420375203762037720378203792038020381203822038320384203852038620387203882038920390203912039220393203942039520396203972039820399204002040120402204032040420405204062040720408204092041020411204122041320414204152041620417204182041920420204212042220423204242042520426204272042820429204302043120432204332043420435204362043720438204392044020441204422044320444204452044620447204482044920450204512045220453204542045520456204572045820459204602046120462204632046420465204662046720468204692047020471204722047320474204752047620477204782047920480204812048220483204842048520486204872048820489204902049120492204932049420495204962049720498204992050020501205022050320504205052050620507205082050920510205112051220513205142051520516205172051820519205202052120522205232052420525205262052720528205292053020531205322053320534205352053620537205382053920540205412054220543205442054520546205472054820549205502055120552205532055420555205562055720558205592056020561205622056320564205652056620567205682056920570205712057220573205742057520576205772057820579205802058120582205832058420585205862058720588205892059020591205922059320594205952059620597205982059920600206012060220603206042060520606206072060820609206102061120612206132061420615206162061720618206192062020621206222062320624206252062620627206282062920630206312063220633206342063520636206372063820639206402064120642206432064420645206462064720648206492065020651206522065320654206552065620657206582065920660206612066220663206642066520666206672066820669206702067120672206732067420675206762067720678206792068020681206822068320684206852068620687206882068920690206912069220693206942069520696206972069820699207002070120702207032070420705207062070720708207092071020711207122071320714207152071620717207182071920720207212072220723207242072520726207272072820729207302073120732207332073420735207362073720738207392074020741207422074320744207452074620747207482074920750207512075220753207542075520756207572075820759207602076120762207632076420765207662076720768207692077020771207722077320774207752077620777207782077920780207812078220783207842078520786207872078820789207902079120792207932079420795207962079720798207992080020801208022080320804208052080620807208082080920810208112081220813208142081520816208172081820819208202082120822208232082420825208262082720828208292083020831208322083320834208352083620837208382083920840208412084220843208442084520846208472084820849208502085120852208532085420855208562085720858208592086020861208622086320864208652086620867208682086920870208712087220873208742087520876208772087820879208802088120882208832088420885208862088720888208892089020891208922089320894208952089620897208982089920900209012090220903209042090520906209072090820909209102091120912209132091420915209162091720918209192092020921209222092320924209252092620927209282092920930209312093220933209342093520936209372093820939209402094120942209432094420945209462094720948209492095020951209522095320954209552095620957209582095920960209612096220963209642096520966209672096820969209702097120972209732097420975209762097720978209792098020981209822098320984209852098620987209882098920990209912099220993209942099520996209972099820999210002100121002210032100421005210062100721008210092101021011210122101321014210152101621017210182101921020210212102221023210242102521026210272102821029210302103121032210332103421035210362103721038210392104021041210422104321044210452104621047210482104921050210512105221053210542105521056210572105821059210602106121062210632106421065210662106721068210692107021071210722107321074210752107621077210782107921080210812108221083210842108521086210872108821089210902109121092210932109421095210962109721098210992110021101211022110321104211052110621107211082110921110211112111221113211142111521116211172111821119211202112121122211232112421125211262112721128211292113021131211322113321134211352113621137211382113921140211412114221143211442114521146211472114821149211502115121152211532115421155211562115721158211592116021161211622116321164211652116621167211682116921170211712117221173211742117521176211772117821179211802118121182211832118421185211862118721188211892119021191211922119321194211952119621197211982119921200212012120221203212042120521206212072120821209212102121121212212132121421215212162121721218212192122021221212222122321224212252122621227212282122921230212312123221233212342123521236212372123821239212402124121242212432124421245212462124721248212492125021251212522125321254212552125621257212582125921260212612126221263212642126521266212672126821269212702127121272212732127421275212762127721278212792128021281212822128321284212852128621287212882128921290212912129221293212942129521296212972129821299213002130121302213032130421305213062130721308213092131021311213122131321314213152131621317213182131921320213212132221323213242132521326213272132821329213302133121332213332133421335213362133721338213392134021341213422134321344213452134621347213482134921350213512135221353213542135521356213572135821359213602136121362213632136421365213662136721368213692137021371213722137321374213752137621377213782137921380213812138221383213842138521386213872138821389213902139121392213932139421395213962139721398213992140021401214022140321404214052140621407214082140921410214112141221413214142141521416214172141821419214202142121422214232142421425214262142721428214292143021431214322143321434214352143621437214382143921440214412144221443214442144521446214472144821449214502145121452214532145421455214562145721458214592146021461214622146321464214652146621467214682146921470214712147221473214742147521476214772147821479214802148121482214832148421485214862148721488214892149021491214922149321494214952149621497214982149921500215012150221503215042150521506215072150821509215102151121512215132151421515215162151721518215192152021521215222152321524215252152621527215282152921530215312153221533215342153521536215372153821539215402154121542215432154421545215462154721548215492155021551215522155321554215552155621557215582155921560215612156221563215642156521566215672156821569215702157121572215732157421575215762157721578215792158021581215822158321584215852158621587215882158921590215912159221593215942159521596215972159821599216002160121602216032160421605216062160721608216092161021611216122161321614216152161621617216182161921620216212162221623216242162521626216272162821629216302163121632216332163421635216362163721638216392164021641216422164321644216452164621647216482164921650216512165221653216542165521656216572165821659216602166121662216632166421665216662166721668216692167021671216722167321674216752167621677216782167921680216812168221683216842168521686216872168821689216902169121692216932169421695216962169721698216992170021701217022170321704217052170621707217082170921710217112171221713217142171521716217172171821719217202172121722217232172421725217262172721728217292173021731217322173321734217352173621737217382173921740217412174221743217442174521746217472174821749217502175121752217532175421755217562175721758217592176021761217622176321764217652176621767217682176921770217712177221773217742177521776217772177821779217802178121782217832178421785217862178721788217892179021791217922179321794217952179621797217982179921800218012180221803218042180521806218072180821809218102181121812218132181421815218162181721818218192182021821218222182321824218252182621827218282182921830218312183221833218342183521836218372183821839218402184121842218432184421845218462184721848218492185021851218522185321854218552185621857218582185921860218612186221863218642186521866218672186821869218702187121872218732187421875218762187721878218792188021881218822188321884218852188621887218882188921890218912189221893218942189521896218972189821899219002190121902219032190421905219062190721908219092191021911219122191321914219152191621917219182191921920219212192221923219242192521926219272192821929219302193121932219332193421935219362193721938219392194021941219422194321944219452194621947219482194921950219512195221953219542195521956219572195821959219602196121962219632196421965219662196721968219692197021971219722197321974219752197621977219782197921980219812198221983219842198521986219872198821989219902199121992219932199421995219962199721998219992200022001220022200322004220052200622007220082200922010220112201222013220142201522016220172201822019220202202122022220232202422025220262202722028220292203022031220322203322034220352203622037220382203922040220412204222043220442204522046220472204822049220502205122052220532205422055220562205722058220592206022061220622206322064220652206622067220682206922070220712207222073220742207522076220772207822079220802208122082220832208422085220862208722088220892209022091220922209322094220952209622097220982209922100221012210222103221042210522106221072210822109221102211122112221132211422115221162211722118221192212022121221222212322124221252212622127221282212922130221312213222133221342213522136221372213822139221402214122142221432214422145221462214722148221492215022151221522215322154221552215622157221582215922160221612216222163221642216522166221672216822169221702217122172221732217422175221762217722178221792218022181221822218322184221852218622187221882218922190221912219222193221942219522196221972219822199222002220122202222032220422205222062220722208222092221022211222122221322214222152221622217222182221922220222212222222223222242222522226222272222822229222302223122232222332223422235222362223722238222392224022241222422224322244222452224622247222482224922250222512225222253222542225522256222572225822259222602226122262222632226422265222662226722268222692227022271222722227322274222752227622277222782227922280222812228222283222842228522286222872228822289222902229122292222932229422295222962229722298222992230022301223022230322304223052230622307223082230922310223112231222313223142231522316223172231822319223202232122322223232232422325223262232722328223292233022331223322233322334223352233622337223382233922340223412234222343223442234522346223472234822349223502235122352223532235422355223562235722358223592236022361223622236322364223652236622367223682236922370223712237222373223742237522376223772237822379223802238122382223832238422385223862238722388223892239022391223922239322394223952239622397223982239922400224012240222403224042240522406224072240822409224102241122412224132241422415224162241722418224192242022421224222242322424224252242622427224282242922430224312243222433224342243522436224372243822439224402244122442224432244422445224462244722448224492245022451224522245322454224552245622457224582245922460224612246222463224642246522466224672246822469224702247122472224732247422475224762247722478224792248022481224822248322484224852248622487224882248922490224912249222493224942249522496224972249822499225002250122502225032250422505225062250722508225092251022511225122251322514225152251622517225182251922520225212252222523225242252522526225272252822529225302253122532225332253422535225362253722538225392254022541225422254322544225452254622547225482254922550225512255222553225542255522556225572255822559225602256122562225632256422565225662256722568225692257022571225722257322574225752257622577225782257922580225812258222583225842258522586225872258822589225902259122592225932259422595225962259722598225992260022601226022260322604226052260622607226082260922610226112261222613226142261522616226172261822619226202262122622226232262422625226262262722628226292263022631226322263322634226352263622637226382263922640226412264222643226442264522646226472264822649226502265122652226532265422655226562265722658226592266022661226622266322664226652266622667226682266922670226712267222673226742267522676226772267822679226802268122682226832268422685226862268722688226892269022691226922269322694226952269622697226982269922700227012270222703227042270522706227072270822709227102271122712227132271422715227162271722718227192272022721227222272322724227252272622727227282272922730227312273222733227342273522736227372273822739227402274122742227432274422745227462274722748227492275022751227522275322754227552275622757227582275922760227612276222763227642276522766227672276822769227702277122772227732277422775227762277722778227792278022781227822278322784227852278622787227882278922790227912279222793227942279522796227972279822799228002280122802228032280422805228062280722808228092281022811228122281322814228152281622817228182281922820228212282222823228242282522826228272282822829228302283122832228332283422835228362283722838228392284022841228422284322844228452284622847228482284922850228512285222853228542285522856228572285822859228602286122862228632286422865228662286722868228692287022871228722287322874228752287622877228782287922880228812288222883228842288522886228872288822889228902289122892228932289422895228962289722898228992290022901229022290322904229052290622907229082290922910229112291222913229142291522916229172291822919229202292122922229232292422925229262292722928229292293022931229322293322934229352293622937229382293922940229412294222943229442294522946229472294822949229502295122952229532295422955229562295722958229592296022961229622296322964229652296622967229682296922970229712297222973229742297522976229772297822979229802298122982229832298422985229862298722988229892299022991229922299322994229952299622997229982299923000230012300223003230042300523006230072300823009230102301123012230132301423015230162301723018230192302023021230222302323024230252302623027230282302923030230312303223033230342303523036230372303823039230402304123042230432304423045230462304723048230492305023051230522305323054230552305623057230582305923060230612306223063230642306523066230672306823069230702307123072230732307423075230762307723078230792308023081230822308323084230852308623087230882308923090230912309223093230942309523096230972309823099231002310123102231032310423105231062310723108231092311023111231122311323114231152311623117231182311923120231212312223123231242312523126231272312823129231302313123132231332313423135231362313723138231392314023141231422314323144231452314623147231482314923150231512315223153231542315523156231572315823159231602316123162231632316423165231662316723168231692317023171231722317323174231752317623177231782317923180231812318223183231842318523186231872318823189231902319123192231932319423195231962319723198231992320023201232022320323204232052320623207232082320923210232112321223213232142321523216232172321823219232202322123222232232322423225232262322723228232292323023231232322323323234232352323623237232382323923240232412324223243232442324523246232472324823249232502325123252232532325423255232562325723258232592326023261232622326323264232652326623267232682326923270232712327223273232742327523276232772327823279232802328123282232832328423285232862328723288232892329023291232922329323294232952329623297232982329923300233012330223303233042330523306233072330823309233102331123312233132331423315233162331723318233192332023321233222332323324233252332623327233282332923330233312333223333233342333523336233372333823339233402334123342233432334423345233462334723348233492335023351233522335323354233552335623357233582335923360233612336223363233642336523366233672336823369233702337123372233732337423375233762337723378233792338023381233822338323384233852338623387233882338923390233912339223393233942339523396233972339823399234002340123402234032340423405234062340723408234092341023411234122341323414234152341623417234182341923420234212342223423234242342523426234272342823429234302343123432234332343423435234362343723438234392344023441234422344323444234452344623447234482344923450234512345223453234542345523456234572345823459234602346123462234632346423465234662346723468234692347023471234722347323474234752347623477234782347923480234812348223483234842348523486234872348823489234902349123492234932349423495234962349723498234992350023501235022350323504235052350623507235082350923510235112351223513235142351523516235172351823519235202352123522235232352423525235262352723528235292353023531235322353323534235352353623537235382353923540235412354223543235442354523546235472354823549235502355123552235532355423555235562355723558235592356023561235622356323564235652356623567235682356923570235712357223573235742357523576235772357823579235802358123582235832358423585235862358723588235892359023591235922359323594235952359623597235982359923600236012360223603236042360523606236072360823609236102361123612236132361423615236162361723618236192362023621236222362323624236252362623627236282362923630236312363223633236342363523636236372363823639236402364123642236432364423645236462364723648236492365023651236522365323654236552365623657236582365923660236612366223663236642366523666236672366823669236702367123672236732367423675236762367723678236792368023681236822368323684236852368623687236882368923690236912369223693236942369523696236972369823699237002370123702237032370423705237062370723708237092371023711237122371323714237152371623717237182371923720237212372223723237242372523726237272372823729237302373123732237332373423735237362373723738237392374023741237422374323744237452374623747237482374923750237512375223753237542375523756237572375823759237602376123762237632376423765237662376723768237692377023771237722377323774237752377623777237782377923780237812378223783237842378523786237872378823789237902379123792237932379423795237962379723798237992380023801238022380323804238052380623807238082380923810238112381223813238142381523816238172381823819238202382123822238232382423825238262382723828238292383023831238322383323834238352383623837238382383923840238412384223843238442384523846238472384823849238502385123852238532385423855238562385723858238592386023861238622386323864238652386623867238682386923870238712387223873238742387523876238772387823879238802388123882238832388423885238862388723888238892389023891238922389323894238952389623897238982389923900239012390223903239042390523906239072390823909239102391123912239132391423915239162391723918239192392023921239222392323924239252392623927239282392923930239312393223933239342393523936239372393823939239402394123942239432394423945239462394723948239492395023951239522395323954239552395623957239582395923960239612396223963239642396523966239672396823969239702397123972239732397423975239762397723978239792398023981239822398323984239852398623987239882398923990239912399223993239942399523996239972399823999240002400124002240032400424005240062400724008240092401024011240122401324014240152401624017240182401924020240212402224023240242402524026240272402824029240302403124032240332403424035240362403724038240392404024041240422404324044240452404624047240482404924050240512405224053240542405524056240572405824059240602406124062240632406424065240662406724068240692407024071240722407324074240752407624077240782407924080240812408224083240842408524086240872408824089240902409124092240932409424095240962409724098240992410024101241022410324104241052410624107241082410924110241112411224113241142411524116241172411824119241202412124122241232412424125241262412724128241292413024131241322413324134241352413624137241382413924140241412414224143241442414524146241472414824149241502415124152241532415424155241562415724158241592416024161241622416324164241652416624167241682416924170241712417224173241742417524176241772417824179241802418124182241832418424185241862418724188241892419024191241922419324194241952419624197241982419924200242012420224203242042420524206242072420824209242102421124212242132421424215242162421724218242192422024221242222422324224242252422624227242282422924230242312423224233242342423524236242372423824239242402424124242242432424424245242462424724248242492425024251242522425324254242552425624257242582425924260242612426224263242642426524266242672426824269242702427124272242732427424275242762427724278242792428024281242822428324284242852428624287242882428924290242912429224293242942429524296242972429824299243002430124302243032430424305243062430724308243092431024311243122431324314243152431624317243182431924320243212432224323243242432524326243272432824329243302433124332243332433424335243362433724338243392434024341243422434324344243452434624347243482434924350243512435224353243542435524356243572435824359243602436124362243632436424365243662436724368243692437024371243722437324374243752437624377243782437924380243812438224383243842438524386243872438824389243902439124392243932439424395243962439724398243992440024401244022440324404244052440624407244082440924410244112441224413244142441524416244172441824419244202442124422244232442424425244262442724428244292443024431244322443324434244352443624437244382443924440244412444224443244442444524446244472444824449244502445124452244532445424455244562445724458244592446024461244622446324464244652446624467244682446924470244712447224473244742447524476244772447824479244802448124482244832448424485244862448724488244892449024491244922449324494244952449624497244982449924500245012450224503245042450524506245072450824509245102451124512245132451424515245162451724518245192452024521245222452324524245252452624527245282452924530245312453224533245342453524536245372453824539245402454124542245432454424545245462454724548245492455024551245522455324554245552455624557245582455924560245612456224563245642456524566245672456824569245702457124572245732457424575245762457724578245792458024581245822458324584245852458624587245882458924590245912459224593245942459524596245972459824599246002460124602246032460424605246062460724608246092461024611246122461324614246152461624617246182461924620246212462224623246242462524626246272462824629246302463124632246332463424635246362463724638246392464024641246422464324644246452464624647246482464924650246512465224653246542465524656246572465824659246602466124662246632466424665246662466724668246692467024671246722467324674246752467624677246782467924680246812468224683246842468524686246872468824689246902469124692246932469424695246962469724698246992470024701247022470324704247052470624707247082470924710247112471224713247142471524716247172471824719247202472124722247232472424725247262472724728247292473024731247322473324734247352473624737247382473924740247412474224743247442474524746247472474824749247502475124752247532475424755247562475724758247592476024761247622476324764247652476624767247682476924770247712477224773247742477524776247772477824779247802478124782247832478424785247862478724788247892479024791247922479324794247952479624797247982479924800248012480224803248042480524806248072480824809248102481124812248132481424815248162481724818248192482024821248222482324824248252482624827248282482924830248312483224833248342483524836248372483824839248402484124842248432484424845248462484724848248492485024851248522485324854248552485624857248582485924860248612486224863248642486524866248672486824869248702487124872248732487424875248762487724878248792488024881248822488324884248852488624887248882488924890248912489224893248942489524896248972489824899249002490124902249032490424905249062490724908249092491024911249122491324914249152491624917249182491924920249212492224923249242492524926249272492824929249302493124932249332493424935249362493724938249392494024941249422494324944249452494624947249482494924950249512495224953249542495524956249572495824959249602496124962249632496424965249662496724968249692497024971249722497324974249752497624977249782497924980249812498224983249842498524986249872498824989249902499124992249932499424995249962499724998249992500025001250022500325004250052500625007250082500925010250112501225013250142501525016250172501825019250202502125022250232502425025250262502725028250292503025031250322503325034250352503625037250382503925040250412504225043250442504525046250472504825049250502505125052250532505425055250562505725058250592506025061250622506325064250652506625067250682506925070250712507225073250742507525076250772507825079250802508125082250832508425085250862508725088250892509025091250922509325094250952509625097250982509925100251012510225103251042510525106251072510825109251102511125112251132511425115251162511725118251192512025121251222512325124251252512625127251282512925130251312513225133251342513525136251372513825139251402514125142251432514425145251462514725148251492515025151251522515325154251552515625157251582515925160251612516225163251642516525166251672516825169251702517125172251732517425175251762517725178251792518025181251822518325184251852518625187251882518925190251912519225193251942519525196251972519825199252002520125202252032520425205252062520725208252092521025211252122521325214252152521625217252182521925220252212522225223252242522525226252272522825229252302523125232252332523425235252362523725238252392524025241252422524325244252452524625247252482524925250252512525225253252542525525256252572525825259252602526125262252632526425265252662526725268252692527025271252722527325274252752527625277252782527925280252812528225283252842528525286252872528825289252902529125292252932529425295252962529725298252992530025301253022530325304253052530625307253082530925310253112531225313253142531525316253172531825319253202532125322253232532425325253262532725328253292533025331253322533325334253352533625337253382533925340253412534225343253442534525346253472534825349253502535125352253532535425355253562535725358253592536025361253622536325364253652536625367253682536925370253712537225373253742537525376253772537825379253802538125382253832538425385253862538725388253892539025391253922539325394253952539625397253982539925400254012540225403254042540525406254072540825409254102541125412254132541425415254162541725418254192542025421254222542325424254252542625427254282542925430254312543225433254342543525436254372543825439254402544125442254432544425445254462544725448254492545025451254522545325454254552545625457254582545925460254612546225463254642546525466254672546825469254702547125472254732547425475254762547725478254792548025481254822548325484254852548625487254882548925490254912549225493254942549525496254972549825499255002550125502255032550425505255062550725508255092551025511255122551325514255152551625517255182551925520255212552225523255242552525526255272552825529255302553125532255332553425535255362553725538255392554025541255422554325544255452554625547255482554925550255512555225553255542555525556255572555825559255602556125562255632556425565255662556725568255692557025571255722557325574255752557625577255782557925580255812558225583255842558525586255872558825589255902559125592255932559425595255962559725598255992560025601256022560325604256052560625607256082560925610256112561225613256142561525616256172561825619256202562125622256232562425625256262562725628256292563025631256322563325634256352563625637256382563925640256412564225643256442564525646256472564825649256502565125652256532565425655256562565725658256592566025661256622566325664256652566625667256682566925670256712567225673256742567525676256772567825679256802568125682256832568425685256862568725688256892569025691256922569325694256952569625697256982569925700257012570225703257042570525706257072570825709257102571125712257132571425715257162571725718257192572025721257222572325724257252572625727257282572925730257312573225733257342573525736257372573825739257402574125742257432574425745257462574725748257492575025751257522575325754257552575625757257582575925760257612576225763257642576525766257672576825769257702577125772257732577425775257762577725778257792578025781257822578325784257852578625787257882578925790257912579225793257942579525796257972579825799258002580125802258032580425805258062580725808258092581025811258122581325814258152581625817258182581925820258212582225823258242582525826258272582825829258302583125832258332583425835258362583725838258392584025841258422584325844258452584625847258482584925850258512585225853258542585525856258572585825859258602586125862258632586425865258662586725868258692587025871258722587325874258752587625877258782587925880258812588225883258842588525886258872588825889258902589125892258932589425895258962589725898258992590025901259022590325904259052590625907259082590925910259112591225913259142591525916259172591825919259202592125922259232592425925259262592725928259292593025931259322593325934259352593625937259382593925940259412594225943259442594525946259472594825949259502595125952259532595425955259562595725958259592596025961259622596325964259652596625967259682596925970259712597225973259742597525976259772597825979259802598125982259832598425985259862598725988259892599025991259922599325994259952599625997259982599926000260012600226003260042600526006260072600826009260102601126012260132601426015260162601726018260192602026021260222602326024260252602626027260282602926030260312603226033260342603526036260372603826039260402604126042260432604426045260462604726048260492605026051260522605326054260552605626057260582605926060260612606226063260642606526066260672606826069260702607126072260732607426075260762607726078260792608026081260822608326084260852608626087260882608926090260912609226093260942609526096260972609826099261002610126102261032610426105261062610726108261092611026111261122611326114261152611626117261182611926120261212612226123261242612526126261272612826129261302613126132261332613426135261362613726138261392614026141261422614326144261452614626147261482614926150261512615226153261542615526156261572615826159261602616126162261632616426165261662616726168261692617026171261722617326174261752617626177261782617926180261812618226183261842618526186261872618826189261902619126192261932619426195261962619726198261992620026201262022620326204262052620626207262082620926210262112621226213262142621526216262172621826219262202622126222262232622426225262262622726228262292623026231262322623326234262352623626237262382623926240262412624226243262442624526246262472624826249262502625126252262532625426255262562625726258262592626026261262622626326264262652626626267262682626926270262712627226273262742627526276262772627826279262802628126282262832628426285262862628726288262892629026291262922629326294262952629626297262982629926300263012630226303263042630526306263072630826309263102631126312263132631426315263162631726318263192632026321263222632326324263252632626327263282632926330263312633226333263342633526336263372633826339263402634126342263432634426345263462634726348263492635026351263522635326354263552635626357263582635926360263612636226363263642636526366263672636826369263702637126372263732637426375263762637726378263792638026381263822638326384263852638626387263882638926390263912639226393263942639526396263972639826399264002640126402264032640426405264062640726408264092641026411264122641326414264152641626417264182641926420264212642226423264242642526426264272642826429264302643126432264332643426435264362643726438264392644026441264422644326444264452644626447264482644926450264512645226453264542645526456264572645826459264602646126462264632646426465264662646726468264692647026471264722647326474264752647626477264782647926480264812648226483264842648526486264872648826489264902649126492264932649426495264962649726498264992650026501265022650326504265052650626507265082650926510265112651226513265142651526516265172651826519265202652126522265232652426525265262652726528265292653026531265322653326534265352653626537265382653926540265412654226543265442654526546265472654826549265502655126552265532655426555265562655726558265592656026561265622656326564265652656626567265682656926570265712657226573265742657526576265772657826579265802658126582265832658426585265862658726588265892659026591265922659326594265952659626597265982659926600266012660226603266042660526606266072660826609266102661126612266132661426615266162661726618266192662026621266222662326624266252662626627266282662926630266312663226633266342663526636266372663826639266402664126642266432664426645266462664726648266492665026651266522665326654266552665626657266582665926660266612666226663266642666526666266672666826669266702667126672266732667426675266762667726678266792668026681266822668326684266852668626687266882668926690266912669226693266942669526696266972669826699267002670126702267032670426705267062670726708267092671026711267122671326714267152671626717267182671926720267212672226723267242672526726267272672826729267302673126732267332673426735267362673726738267392674026741267422674326744267452674626747267482674926750267512675226753267542675526756267572675826759267602676126762267632676426765267662676726768267692677026771267722677326774267752677626777267782677926780267812678226783267842678526786267872678826789267902679126792267932679426795267962679726798267992680026801268022680326804268052680626807268082680926810268112681226813268142681526816268172681826819268202682126822268232682426825268262682726828268292683026831268322683326834268352683626837268382683926840268412684226843268442684526846268472684826849268502685126852268532685426855268562685726858268592686026861268622686326864268652686626867268682686926870268712687226873268742687526876268772687826879268802688126882268832688426885268862688726888268892689026891268922689326894268952689626897268982689926900269012690226903269042690526906269072690826909269102691126912269132691426915269162691726918269192692026921269222692326924269252692626927269282692926930269312693226933269342693526936269372693826939269402694126942269432694426945269462694726948269492695026951269522695326954269552695626957269582695926960269612696226963269642696526966269672696826969269702697126972269732697426975269762697726978269792698026981269822698326984269852698626987269882698926990269912699226993269942699526996269972699826999270002700127002270032700427005270062700727008270092701027011270122701327014270152701627017270182701927020270212702227023270242702527026270272702827029270302703127032270332703427035270362703727038270392704027041270422704327044270452704627047270482704927050270512705227053270542705527056270572705827059270602706127062270632706427065270662706727068270692707027071270722707327074270752707627077270782707927080270812708227083270842708527086270872708827089270902709127092270932709427095270962709727098270992710027101271022710327104271052710627107271082710927110271112711227113271142711527116271172711827119271202712127122271232712427125271262712727128271292713027131271322713327134271352713627137271382713927140271412714227143271442714527146271472714827149271502715127152271532715427155271562715727158271592716027161271622716327164271652716627167271682716927170271712717227173271742717527176271772717827179271802718127182271832718427185271862718727188271892719027191271922719327194271952719627197271982719927200272012720227203272042720527206272072720827209272102721127212272132721427215272162721727218272192722027221272222722327224272252722627227272282722927230272312723227233272342723527236272372723827239272402724127242272432724427245272462724727248272492725027251272522725327254272552725627257272582725927260272612726227263272642726527266272672726827269272702727127272272732727427275272762727727278272792728027281272822728327284272852728627287272882728927290272912729227293272942729527296272972729827299273002730127302273032730427305273062730727308273092731027311273122731327314273152731627317273182731927320273212732227323273242732527326273272732827329273302733127332273332733427335273362733727338273392734027341273422734327344273452734627347273482734927350273512735227353273542735527356273572735827359273602736127362273632736427365273662736727368273692737027371273722737327374273752737627377273782737927380273812738227383273842738527386273872738827389273902739127392273932739427395273962739727398273992740027401274022740327404274052740627407274082740927410274112741227413274142741527416274172741827419274202742127422274232742427425274262742727428274292743027431274322743327434274352743627437274382743927440274412744227443274442744527446274472744827449274502745127452274532745427455274562745727458274592746027461274622746327464274652746627467274682746927470274712747227473274742747527476274772747827479274802748127482274832748427485274862748727488274892749027491274922749327494274952749627497274982749927500275012750227503275042750527506275072750827509275102751127512275132751427515275162751727518275192752027521275222752327524275252752627527275282752927530275312753227533275342753527536275372753827539275402754127542275432754427545275462754727548275492755027551275522755327554275552755627557275582755927560275612756227563275642756527566275672756827569275702757127572275732757427575275762757727578275792758027581275822758327584275852758627587275882758927590275912759227593275942759527596275972759827599276002760127602276032760427605276062760727608276092761027611276122761327614276152761627617276182761927620276212762227623276242762527626276272762827629276302763127632276332763427635276362763727638276392764027641276422764327644276452764627647276482764927650276512765227653276542765527656276572765827659276602766127662276632766427665276662766727668276692767027671276722767327674276752767627677276782767927680276812768227683276842768527686276872768827689276902769127692276932769427695276962769727698276992770027701277022770327704277052770627707277082770927710277112771227713277142771527716277172771827719277202772127722277232772427725277262772727728277292773027731277322773327734277352773627737277382773927740277412774227743277442774527746277472774827749277502775127752277532775427755277562775727758277592776027761277622776327764277652776627767277682776927770277712777227773277742777527776277772777827779277802778127782277832778427785277862778727788277892779027791277922779327794277952779627797277982779927800278012780227803278042780527806278072780827809278102781127812278132781427815278162781727818278192782027821278222782327824278252782627827278282782927830278312783227833278342783527836278372783827839278402784127842278432784427845278462784727848278492785027851278522785327854278552785627857278582785927860278612786227863278642786527866278672786827869278702787127872278732787427875278762787727878278792788027881278822788327884278852788627887278882788927890278912789227893278942789527896278972789827899279002790127902279032790427905279062790727908279092791027911279122791327914279152791627917279182791927920279212792227923279242792527926279272792827929279302793127932279332793427935279362793727938279392794027941279422794327944279452794627947279482794927950279512795227953279542795527956279572795827959279602796127962279632796427965279662796727968279692797027971279722797327974279752797627977279782797927980279812798227983279842798527986279872798827989279902799127992279932799427995279962799727998279992800028001280022800328004280052800628007280082800928010280112801228013280142801528016280172801828019280202802128022280232802428025280262802728028280292803028031280322803328034280352803628037280382803928040280412804228043280442804528046280472804828049280502805128052280532805428055280562805728058280592806028061280622806328064280652806628067280682806928070280712807228073280742807528076280772807828079280802808128082280832808428085280862808728088280892809028091280922809328094280952809628097280982809928100281012810228103281042810528106281072810828109281102811128112281132811428115281162811728118281192812028121281222812328124281252812628127281282812928130281312813228133281342813528136281372813828139281402814128142281432814428145281462814728148281492815028151281522815328154281552815628157281582815928160281612816228163281642816528166281672816828169281702817128172281732817428175281762817728178281792818028181281822818328184281852818628187281882818928190281912819228193281942819528196281972819828199282002820128202282032820428205282062820728208282092821028211282122821328214282152821628217282182821928220282212822228223282242822528226282272822828229282302823128232282332823428235282362823728238282392824028241282422824328244282452824628247282482824928250282512825228253282542825528256282572825828259282602826128262282632826428265282662826728268282692827028271282722827328274282752827628277282782827928280282812828228283282842828528286282872828828289282902829128292282932829428295282962829728298282992830028301283022830328304283052830628307283082830928310283112831228313283142831528316283172831828319283202832128322283232832428325283262832728328283292833028331283322833328334283352833628337283382833928340283412834228343283442834528346283472834828349283502835128352283532835428355283562835728358283592836028361283622836328364283652836628367283682836928370283712837228373283742837528376283772837828379283802838128382283832838428385283862838728388283892839028391283922839328394283952839628397283982839928400284012840228403284042840528406284072840828409284102841128412284132841428415284162841728418284192842028421284222842328424284252842628427284282842928430284312843228433284342843528436284372843828439284402844128442284432844428445284462844728448284492845028451284522845328454284552845628457284582845928460284612846228463284642846528466284672846828469284702847128472284732847428475284762847728478284792848028481284822848328484284852848628487284882848928490284912849228493284942849528496284972849828499285002850128502285032850428505285062850728508285092851028511285122851328514285152851628517285182851928520285212852228523285242852528526285272852828529285302853128532285332853428535285362853728538285392854028541285422854328544285452854628547285482854928550285512855228553285542855528556285572855828559285602856128562285632856428565285662856728568285692857028571285722857328574285752857628577285782857928580285812858228583285842858528586285872858828589285902859128592285932859428595285962859728598285992860028601286022860328604286052860628607286082860928610286112861228613286142861528616286172861828619286202862128622286232862428625286262862728628286292863028631286322863328634286352863628637286382863928640286412864228643286442864528646286472864828649286502865128652286532865428655286562865728658286592866028661286622866328664286652866628667286682866928670286712867228673286742867528676286772867828679286802868128682286832868428685286862868728688286892869028691286922869328694286952869628697286982869928700287012870228703287042870528706287072870828709287102871128712287132871428715287162871728718287192872028721287222872328724287252872628727287282872928730287312873228733287342873528736287372873828739287402874128742287432874428745287462874728748287492875028751287522875328754287552875628757287582875928760287612876228763287642876528766287672876828769287702877128772287732877428775287762877728778287792878028781287822878328784287852878628787287882878928790287912879228793287942879528796287972879828799288002880128802288032880428805288062880728808288092881028811288122881328814288152881628817288182881928820288212882228823288242882528826288272882828829288302883128832288332883428835288362883728838288392884028841288422884328844288452884628847288482884928850288512885228853288542885528856288572885828859288602886128862288632886428865288662886728868288692887028871288722887328874288752887628877288782887928880288812888228883288842888528886288872888828889288902889128892288932889428895288962889728898288992890028901289022890328904289052890628907289082890928910289112891228913289142891528916289172891828919289202892128922289232892428925289262892728928289292893028931289322893328934289352893628937289382893928940289412894228943289442894528946289472894828949289502895128952289532895428955289562895728958289592896028961289622896328964289652896628967289682896928970289712897228973289742897528976289772897828979289802898128982289832898428985289862898728988289892899028991289922899328994289952899628997289982899929000290012900229003290042900529006290072900829009290102901129012290132901429015290162901729018290192902029021290222902329024290252902629027290282902929030290312903229033290342903529036290372903829039290402904129042290432904429045290462904729048290492905029051290522905329054290552905629057290582905929060290612906229063290642906529066290672906829069290702907129072290732907429075290762907729078290792908029081290822908329084290852908629087290882908929090290912909229093290942909529096290972909829099291002910129102291032910429105291062910729108291092911029111291122911329114291152911629117291182911929120291212912229123291242912529126291272912829129291302913129132291332913429135291362913729138291392914029141291422914329144291452914629147291482914929150291512915229153291542915529156291572915829159291602916129162291632916429165291662916729168291692917029171291722917329174291752917629177291782917929180291812918229183291842918529186291872918829189291902919129192291932919429195291962919729198291992920029201292022920329204292052920629207292082920929210292112921229213292142921529216292172921829219292202922129222292232922429225292262922729228292292923029231292322923329234292352923629237292382923929240292412924229243292442924529246292472924829249292502925129252292532925429255292562925729258292592926029261292622926329264292652926629267292682926929270292712927229273292742927529276292772927829279292802928129282292832928429285292862928729288292892929029291292922929329294292952929629297292982929929300293012930229303293042930529306293072930829309293102931129312293132931429315293162931729318293192932029321293222932329324293252932629327293282932929330293312933229333293342933529336293372933829339293402934129342293432934429345293462934729348293492935029351293522935329354293552935629357293582935929360293612936229363293642936529366293672936829369293702937129372293732937429375293762937729378293792938029381293822938329384293852938629387293882938929390293912939229393293942939529396293972939829399294002940129402294032940429405294062940729408294092941029411294122941329414294152941629417294182941929420294212942229423294242942529426294272942829429294302943129432294332943429435294362943729438294392944029441294422944329444294452944629447294482944929450294512945229453294542945529456294572945829459294602946129462294632946429465294662946729468294692947029471294722947329474294752947629477294782947929480294812948229483294842948529486294872948829489294902949129492294932949429495294962949729498294992950029501295022950329504295052950629507295082950929510295112951229513295142951529516295172951829519295202952129522295232952429525295262952729528295292953029531295322953329534295352953629537295382953929540295412954229543295442954529546295472954829549295502955129552295532955429555295562955729558295592956029561295622956329564295652956629567295682956929570295712957229573295742957529576295772957829579295802958129582295832958429585295862958729588295892959029591295922959329594295952959629597295982959929600296012960229603296042960529606296072960829609296102961129612296132961429615296162961729618296192962029621296222962329624296252962629627296282962929630296312963229633296342963529636296372963829639296402964129642296432964429645296462964729648296492965029651296522965329654296552965629657296582965929660296612966229663296642966529666296672966829669296702967129672296732967429675296762967729678296792968029681296822968329684296852968629687296882968929690296912969229693296942969529696296972969829699297002970129702297032970429705297062970729708297092971029711297122971329714297152971629717297182971929720297212972229723297242972529726297272972829729297302973129732297332973429735297362973729738297392974029741297422974329744297452974629747297482974929750297512975229753297542975529756297572975829759297602976129762297632976429765297662976729768297692977029771297722977329774297752977629777297782977929780297812978229783297842978529786297872978829789297902979129792297932979429795297962979729798297992980029801298022980329804298052980629807298082980929810298112981229813298142981529816298172981829819298202982129822298232982429825298262982729828298292983029831298322983329834298352983629837298382983929840298412984229843298442984529846298472984829849298502985129852298532985429855298562985729858298592986029861298622986329864298652986629867298682986929870298712987229873298742987529876298772987829879298802988129882298832988429885298862988729888298892989029891298922989329894298952989629897298982989929900299012990229903299042990529906299072990829909299102991129912299132991429915299162991729918299192992029921299222992329924299252992629927299282992929930299312993229933299342993529936299372993829939299402994129942299432994429945299462994729948299492995029951299522995329954299552995629957299582995929960299612996229963299642996529966299672996829969299702997129972299732997429975299762997729978299792998029981299822998329984299852998629987299882998929990299912999229993299942999529996299972999829999300003000130002300033000430005300063000730008300093001030011300123001330014300153001630017300183001930020300213002230023300243002530026300273002830029300303003130032300333003430035300363003730038300393004030041300423004330044300453004630047300483004930050300513005230053300543005530056300573005830059300603006130062300633006430065300663006730068300693007030071300723007330074300753007630077300783007930080300813008230083300843008530086300873008830089300903009130092300933009430095300963009730098300993010030101301023010330104301053010630107301083010930110301113011230113301143011530116301173011830119301203012130122301233012430125301263012730128301293013030131301323013330134301353013630137301383013930140301413014230143301443014530146301473014830149301503015130152301533015430155301563015730158301593016030161301623016330164301653016630167301683016930170301713017230173301743017530176301773017830179301803018130182301833018430185301863018730188301893019030191301923019330194301953019630197301983019930200302013020230203302043020530206302073020830209302103021130212302133021430215302163021730218302193022030221302223022330224302253022630227302283022930230302313023230233302343023530236302373023830239302403024130242302433024430245302463024730248302493025030251302523025330254302553025630257302583025930260302613026230263302643026530266302673026830269302703027130272302733027430275302763027730278302793028030281302823028330284302853028630287302883028930290302913029230293302943029530296302973029830299303003030130302303033030430305303063030730308303093031030311303123031330314303153031630317303183031930320303213032230323303243032530326303273032830329303303033130332303333033430335303363033730338303393034030341303423034330344303453034630347303483034930350303513035230353303543035530356303573035830359303603036130362303633036430365303663036730368303693037030371303723037330374303753037630377303783037930380303813038230383303843038530386303873038830389303903039130392303933039430395303963039730398303993040030401304023040330404304053040630407304083040930410304113041230413304143041530416304173041830419304203042130422304233042430425304263042730428304293043030431304323043330434304353043630437304383043930440304413044230443304443044530446304473044830449304503045130452304533045430455304563045730458304593046030461304623046330464304653046630467304683046930470304713047230473304743047530476304773047830479304803048130482304833048430485304863048730488304893049030491304923049330494304953049630497304983049930500305013050230503305043050530506305073050830509305103051130512305133051430515305163051730518305193052030521305223052330524305253052630527305283052930530305313053230533305343053530536305373053830539305403054130542305433054430545305463054730548305493055030551305523055330554305553055630557305583055930560305613056230563305643056530566305673056830569305703057130572305733057430575305763057730578305793058030581305823058330584305853058630587305883058930590305913059230593305943059530596305973059830599306003060130602306033060430605306063060730608306093061030611306123061330614306153061630617306183061930620306213062230623306243062530626306273062830629306303063130632306333063430635306363063730638306393064030641306423064330644306453064630647306483064930650306513065230653306543065530656306573065830659306603066130662306633066430665306663066730668306693067030671306723067330674306753067630677306783067930680306813068230683306843068530686306873068830689306903069130692306933069430695306963069730698306993070030701307023070330704307053070630707307083070930710307113071230713307143071530716307173071830719307203072130722307233072430725307263072730728307293073030731307323073330734307353073630737307383073930740307413074230743307443074530746307473074830749307503075130752307533075430755307563075730758307593076030761307623076330764307653076630767307683076930770307713077230773307743077530776307773077830779307803078130782307833078430785307863078730788307893079030791307923079330794307953079630797307983079930800308013080230803308043080530806308073080830809308103081130812308133081430815308163081730818308193082030821308223082330824308253082630827308283082930830308313083230833308343083530836308373083830839308403084130842308433084430845308463084730848308493085030851308523085330854308553085630857308583085930860308613086230863308643086530866308673086830869308703087130872308733087430875308763087730878308793088030881308823088330884308853088630887308883088930890308913089230893308943089530896308973089830899309003090130902309033090430905309063090730908309093091030911309123091330914309153091630917309183091930920309213092230923309243092530926309273092830929309303093130932309333093430935309363093730938309393094030941309423094330944309453094630947309483094930950309513095230953309543095530956309573095830959309603096130962309633096430965309663096730968309693097030971309723097330974309753097630977309783097930980309813098230983309843098530986309873098830989309903099130992309933099430995309963099730998309993100031001310023100331004310053100631007310083100931010310113101231013310143101531016310173101831019310203102131022310233102431025310263102731028310293103031031310323103331034310353103631037310383103931040310413104231043310443104531046310473104831049310503105131052310533105431055310563105731058310593106031061310623106331064310653106631067310683106931070310713107231073310743107531076310773107831079310803108131082310833108431085310863108731088310893109031091310923109331094310953109631097310983109931100311013110231103311043110531106311073110831109311103111131112311133111431115311163111731118311193112031121311223112331124311253112631127311283112931130311313113231133311343113531136311373113831139311403114131142311433114431145311463114731148311493115031151311523115331154311553115631157311583115931160311613116231163311643116531166311673116831169311703117131172311733117431175311763117731178311793118031181311823118331184311853118631187311883118931190 |
- #pragma once
- #include <ATen/Operators.h>
- #include <ATen/functorch/PlumbingHelper.h>
- namespace at { namespace functorch {
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _cast_Byte_generated_plumbing(const at::Tensor & self, bool non_blocking) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_cast_Byte::call(self, non_blocking);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, non_blocking);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _cast_Char_generated_plumbing(const at::Tensor & self, bool non_blocking) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_cast_Char::call(self, non_blocking);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, non_blocking);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _cast_Double_generated_plumbing(const at::Tensor & self, bool non_blocking) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_cast_Double::call(self, non_blocking);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, non_blocking);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _cast_Float_generated_plumbing(const at::Tensor & self, bool non_blocking) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_cast_Float::call(self, non_blocking);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, non_blocking);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _cast_Int_generated_plumbing(const at::Tensor & self, bool non_blocking) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_cast_Int::call(self, non_blocking);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, non_blocking);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _cast_Long_generated_plumbing(const at::Tensor & self, bool non_blocking) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_cast_Long::call(self, non_blocking);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, non_blocking);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _cast_Short_generated_plumbing(const at::Tensor & self, bool non_blocking) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_cast_Short::call(self, non_blocking);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, non_blocking);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _cast_Half_generated_plumbing(const at::Tensor & self, bool non_blocking) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_cast_Half::call(self, non_blocking);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, non_blocking);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _backward_generated_plumbing(const at::Tensor & self, at::TensorList inputs, const c10::optional<at::Tensor> & gradient, c10::optional<bool> retain_graph, bool create_graph) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(inputs, cur_level) && !isBatchedAtLevel(gradient, cur_level)) {
- return at::_ops::_backward::call(self, inputs, gradient, retain_graph, create_graph);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- optional<Tensor> gradient_value;
- optional<int64_t> gradient_bdim;
- if (gradient) {
- std::tie(gradient_value, gradient_bdim) = unwrapTensorAtLevel(gradient.value(), cur_level);
- }
- batch_rule(self_value, self_bdim, inputs, gradient_value, gradient_bdim, retain_graph, create_graph);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void set_data_generated_plumbing(at::Tensor & self, const at::Tensor & new_data) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(new_data, cur_level)) {
- return at::_ops::set_data::call(self, new_data);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor new_data_value;
- optional<int64_t> new_data_bdim;
- std::tie(new_data_value, new_data_bdim) = unwrapTensorAtLevel(new_data, cur_level);
- batch_rule(self_value, self_bdim, new_data_value, new_data_bdim);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor data_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::data::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & requires_grad__generated_plumbing(at::Tensor & self, bool requires_grad) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::requires_grad_::call(self, requires_grad);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, requires_grad);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void retain_grad_generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::retain_grad::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _fw_primal_generated_plumbing(const at::Tensor & self, int64_t level) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_fw_primal::call(self, level);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, level);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _make_dual_generated_plumbing(const at::Tensor & primal, const at::Tensor & tangent, int64_t level) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(primal, cur_level) && !isBatchedAtLevel(tangent, cur_level)) {
- return at::_ops::_make_dual::call(primal, tangent, level);
- }
- Tensor primal_value;
- optional<int64_t> primal_bdim;
- std::tie(primal_value, primal_bdim) = unwrapTensorAtLevel(primal, cur_level);
- Tensor tangent_value;
- optional<int64_t> tangent_bdim;
- std::tie(tangent_value, tangent_bdim) = unwrapTensorAtLevel(tangent, cur_level);
- auto results = batch_rule(primal_value, primal_bdim, tangent_value, tangent_bdim, level);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> _unpack_dual_generated_plumbing(const at::Tensor & dual, int64_t level) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(dual, cur_level)) {
- return at::_ops::_unpack_dual::call(dual, level);
- }
- Tensor dual_value;
- optional<int64_t> dual_bdim;
- std::tie(dual_value, dual_bdim) = unwrapTensorAtLevel(dual, cur_level);
- auto results = batch_rule(dual_value, dual_bdim, level);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _new_zeros_with_same_feature_meta_generated_plumbing(const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::_new_zeros_with_same_feature_meta::call(self, other, self_num_batch_dims);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, self_num_batch_dims);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor rename_generated_plumbing(const at::Tensor & self, c10::optional<at::DimnameList> names) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::rename::call(self, names);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, names);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor align_to_generated_plumbing(const at::Tensor & self, at::DimnameList names) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::align_to::call(self, names);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, names);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor align_to_ellipsis_idx_generated_plumbing(const at::Tensor & self, at::DimnameList order, int64_t ellipsis_idx) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::align_to_ellipsis_idx::call(self, order, ellipsis_idx);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, order, ellipsis_idx);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor align_as_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::align_as::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> align_tensors_generated_plumbing(at::TensorList tensors) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(tensors, cur_level)) {
- return at::_ops::align_tensors::call(tensors);
- }
- auto results = batch_rule(tensors);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _assert_async_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_assert_async::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _assert_tensor_metadata_generated_plumbing(const at::Tensor & a, at::OptionalIntArrayRef size, at::OptionalIntArrayRef stride, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(a, cur_level)) {
- return at::_ops::_assert_tensor_metadata::call(a, size, stride, dtype);
- }
- Tensor a_value;
- optional<int64_t> a_bdim;
- std::tie(a_value, a_bdim) = unwrapTensorAtLevel(a, cur_level);
- batch_rule(a_value, a_bdim, size, stride, dtype);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor refine_names_generated_plumbing(const at::Tensor & self, at::DimnameList names) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::refine_names::call(self, names);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, names);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> _cudnn_ctc_loss_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level)) {
- return at::_ops::_cudnn_ctc_loss::call(log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity);
- }
- Tensor log_probs_value;
- optional<int64_t> log_probs_bdim;
- std::tie(log_probs_value, log_probs_bdim) = unwrapTensorAtLevel(log_probs, cur_level);
- Tensor targets_value;
- optional<int64_t> targets_bdim;
- std::tie(targets_value, targets_bdim) = unwrapTensorAtLevel(targets, cur_level);
- auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths, target_lengths, blank, deterministic, zero_infinity);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> _cudnn_ctc_loss_Tensor_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool deterministic, bool zero_infinity) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level) && !isBatchedAtLevel(input_lengths, cur_level) && !isBatchedAtLevel(target_lengths, cur_level)) {
- return at::_ops::_cudnn_ctc_loss_Tensor::call(log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity);
- }
- Tensor log_probs_value;
- optional<int64_t> log_probs_bdim;
- std::tie(log_probs_value, log_probs_bdim) = unwrapTensorAtLevel(log_probs, cur_level);
- Tensor targets_value;
- optional<int64_t> targets_bdim;
- std::tie(targets_value, targets_bdim) = unwrapTensorAtLevel(targets, cur_level);
- Tensor input_lengths_value;
- optional<int64_t> input_lengths_bdim;
- std::tie(input_lengths_value, input_lengths_bdim) = unwrapTensorAtLevel(input_lengths, cur_level);
- Tensor target_lengths_value;
- optional<int64_t> target_lengths_bdim;
- std::tie(target_lengths_value, target_lengths_bdim) = unwrapTensorAtLevel(target_lengths, cur_level);
- auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths_value, input_lengths_bdim, target_lengths_value, target_lengths_bdim, blank, deterministic, zero_infinity);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _cudnn_rnn_flatten_weight_generated_plumbing(at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(weight_arr, cur_level)) {
- return at::_ops::_cudnn_rnn_flatten_weight::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional);
- }
- auto results = batch_rule(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _cudnn_rnn_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional<at::Tensor> & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(weight_buf, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(dropout_state, cur_level)) {
- return at::_ops::_cudnn_rnn::call(input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor hx_value;
- optional<int64_t> hx_bdim;
- std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
- optional<Tensor> weight_buf_value;
- optional<int64_t> weight_buf_bdim;
- if (weight_buf) {
- std::tie(weight_buf_value, weight_buf_bdim) = unwrapTensorAtLevel(weight_buf.value(), cur_level);
- }
- optional<Tensor> cx_value;
- optional<int64_t> cx_bdim;
- if (cx) {
- std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level);
- }
- optional<Tensor> dropout_state_value;
- optional<int64_t> dropout_state_bdim;
- if (dropout_state) {
- std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, weight, weight_stride0, weight_buf_value, weight_buf_bdim, hx_value, hx_bdim, cx_value, cx_bdim, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor,::std::vector<at::Tensor>> _cudnn_rnn_backward_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(weight_buf, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(dropout_state, cur_level) && !isBatchedAtLevel(reserve, cur_level)) {
- return at::_ops::_cudnn_rnn_backward::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor weight_buf_value;
- optional<int64_t> weight_buf_bdim;
- std::tie(weight_buf_value, weight_buf_bdim) = unwrapTensorAtLevel(weight_buf, cur_level);
- Tensor hx_value;
- optional<int64_t> hx_bdim;
- std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
- Tensor output_value;
- optional<int64_t> output_bdim;
- std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
- Tensor reserve_value;
- optional<int64_t> reserve_bdim;
- std::tie(reserve_value, reserve_bdim) = unwrapTensorAtLevel(reserve, cur_level);
- optional<Tensor> cx_value;
- optional<int64_t> cx_bdim;
- if (cx) {
- std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level);
- }
- optional<Tensor> grad_output_value;
- optional<int64_t> grad_output_bdim;
- if (grad_output) {
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output.value(), cur_level);
- }
- optional<Tensor> grad_hy_value;
- optional<int64_t> grad_hy_bdim;
- if (grad_hy) {
- std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
- }
- optional<Tensor> grad_cy_value;
- optional<int64_t> grad_cy_bdim;
- if (grad_cy) {
- std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
- }
- optional<Tensor> dropout_state_value;
- optional<int64_t> dropout_state_bdim;
- if (dropout_state) {
- std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, weight, weight_stride0, weight_buf_value, weight_buf_bdim, hx_value, hx_bdim, cx_value, cx_bdim, output_value, output_bdim, grad_output_value, grad_output_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim, reserve_value, reserve_bdim, output_mask);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatchedVector(std::get<6>(results), std::get<7>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> _fused_dropout_generated_plumbing(const at::Tensor & self, double p, c10::optional<at::Generator> generator) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_fused_dropout::call(self, p, generator);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, p, generator);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _masked_scale_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, double scale) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
- return at::_ops::_masked_scale::call(self, mask, scale);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor mask_value;
- optional<int64_t> mask_bdim;
- std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
- auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, scale);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> native_dropout_generated_plumbing(const at::Tensor & input, double p, c10::optional<bool> train) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level)) {
- return at::_ops::native_dropout::call(input, p, train);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- auto results = batch_rule(input_value, input_bdim, p, train);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor native_dropout_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & mask, double scale) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
- return at::_ops::native_dropout_backward::call(grad_output, mask, scale);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor mask_value;
- optional<int64_t> mask_bdim;
- std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, mask_value, mask_bdim, scale);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> _sobol_engine_draw_generated_plumbing(const at::Tensor & quasi, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(quasi, cur_level) && !isBatchedAtLevel(sobolstate, cur_level)) {
- return at::_ops::_sobol_engine_draw::call(quasi, n, sobolstate, dimension, num_generated, dtype);
- }
- Tensor quasi_value;
- optional<int64_t> quasi_bdim;
- std::tie(quasi_value, quasi_bdim) = unwrapTensorAtLevel(quasi, cur_level);
- Tensor sobolstate_value;
- optional<int64_t> sobolstate_bdim;
- std::tie(sobolstate_value, sobolstate_bdim) = unwrapTensorAtLevel(sobolstate, cur_level);
- auto results = batch_rule(quasi_value, quasi_bdim, n, sobolstate_value, sobolstate_bdim, dimension, num_generated, dtype);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & _sobol_engine_ff__generated_plumbing(at::Tensor & self, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(sobolstate, cur_level)) {
- return at::_ops::_sobol_engine_ff_::call(self, n, sobolstate, dimension, num_generated);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor sobolstate_value;
- optional<int64_t> sobolstate_bdim;
- std::tie(sobolstate_value, sobolstate_bdim) = unwrapTensorAtLevel(sobolstate, cur_level);
- batch_rule(self_value, self_bdim, n, sobolstate_value, sobolstate_bdim, dimension, num_generated);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & _sobol_engine_scramble__generated_plumbing(at::Tensor & self, const at::Tensor & ltm, int64_t dimension) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(ltm, cur_level)) {
- return at::_ops::_sobol_engine_scramble_::call(self, ltm, dimension);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor ltm_value;
- optional<int64_t> ltm_bdim;
- std::tie(ltm_value, ltm_bdim) = unwrapTensorAtLevel(ltm, cur_level);
- batch_rule(self_value, self_bdim, ltm_value, ltm_bdim, dimension);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & _sobol_engine_initialize_state__generated_plumbing(at::Tensor & self, int64_t dimension) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_sobol_engine_initialize_state_::call(self, dimension);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, dimension);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _reshape_from_tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & shape) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(shape, cur_level)) {
- return at::_ops::_reshape_from_tensor::call(self, shape);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor shape_value;
- optional<int64_t> shape_bdim;
- std::tie(shape_value, shape_bdim) = unwrapTensorAtLevel(shape, cur_level);
- auto results = batch_rule(self_value, self_bdim, shape_value, shape_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _shape_as_tensor_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_shape_as_tensor::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor dropout_generated_plumbing(const at::Tensor & input, double p, bool train) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level)) {
- return at::_ops::dropout::call(input, p, train);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- auto results = batch_rule(input_value, input_bdim, p, train);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & dropout__generated_plumbing(at::Tensor & self, double p, bool train) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::dropout_::call(self, p, train);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, p, train);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor feature_dropout_generated_plumbing(const at::Tensor & input, double p, bool train) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level)) {
- return at::_ops::feature_dropout::call(input, p, train);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- auto results = batch_rule(input_value, input_bdim, p, train);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & feature_dropout__generated_plumbing(at::Tensor & self, double p, bool train) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::feature_dropout_::call(self, p, train);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, p, train);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor alpha_dropout_generated_plumbing(const at::Tensor & input, double p, bool train) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level)) {
- return at::_ops::alpha_dropout::call(input, p, train);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- auto results = batch_rule(input_value, input_bdim, p, train);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & alpha_dropout__generated_plumbing(at::Tensor & self, double p, bool train) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::alpha_dropout_::call(self, p, train);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, p, train);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor feature_alpha_dropout_generated_plumbing(const at::Tensor & input, double p, bool train) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level)) {
- return at::_ops::feature_alpha_dropout::call(input, p, train);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- auto results = batch_rule(input_value, input_bdim, p, train);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & feature_alpha_dropout__generated_plumbing(at::Tensor & self, double p, bool train) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::feature_alpha_dropout_::call(self, p, train);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, p, train);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor abs_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::abs::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & abs__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::abs_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor absolute_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::absolute::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & absolute__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::absolute_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor angle_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::angle::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor view_as_real_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::view_as_real::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor view_as_complex_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::view_as_complex::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor sgn_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::sgn::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & sgn__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::sgn_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor chalf_generated_plumbing(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::chalf::call(self, memory_format);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, memory_format);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor real_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::real::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor imag_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::imag::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _conj_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_conj::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor conj_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::conj::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _conj_physical_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_conj_physical::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor conj_physical_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::conj_physical::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & conj_physical__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::conj_physical_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor resolve_conj_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::resolve_conj::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor resolve_neg_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::resolve_neg::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _neg_view_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_neg_view::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor acos_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::acos::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & acos__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::acos_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor arccos_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::arccos::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & arccos__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::arccos_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor avg_pool1d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::avg_pool1d::call(self, kernel_size, stride, padding, ceil_mode, count_include_pad);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, ceil_mode, count_include_pad);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor adaptive_avg_pool1d_generated_plumbing(const at::Tensor & self, at::IntArrayRef output_size) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::adaptive_avg_pool1d::call(self, output_size);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, output_size);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool1d_generated_plumbing(const at::Tensor & self, at::IntArrayRef output_size) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::adaptive_max_pool1d::call(self, output_size);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, output_size);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor add_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::add_Tensor::call(self, other, alpha);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & add__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::add__Tensor::call(self, other, alpha);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _add_relu_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::_add_relu_Tensor::call(self, other, alpha);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & _add_relu__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::_add_relu__Tensor::call(self, other, alpha);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _add_relu_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_add_relu_Scalar::call(self, other, alpha);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other, alpha);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & _add_relu__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_add_relu__Scalar::call(self, other, alpha);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, other, alpha);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor add_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::add_Scalar::call(self, other, alpha);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other, alpha);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & add__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::add__Scalar::call(self, other, alpha);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, other, alpha);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor addmv_generated_plumbing(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat, cur_level) && !isBatchedAtLevel(vec, cur_level)) {
- return at::_ops::addmv::call(self, mat, vec, beta, alpha);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor mat_value;
- optional<int64_t> mat_bdim;
- std::tie(mat_value, mat_bdim) = unwrapTensorAtLevel(mat, cur_level);
- Tensor vec_value;
- optional<int64_t> vec_bdim;
- std::tie(vec_value, vec_bdim) = unwrapTensorAtLevel(vec, cur_level);
- auto results = batch_rule(self_value, self_bdim, mat_value, mat_bdim, vec_value, vec_bdim, beta, alpha);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & addmv__generated_plumbing(at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat, cur_level) && !isBatchedAtLevel(vec, cur_level)) {
- return at::_ops::addmv_::call(self, mat, vec, beta, alpha);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor mat_value;
- optional<int64_t> mat_bdim;
- std::tie(mat_value, mat_bdim) = unwrapTensorAtLevel(mat, cur_level);
- Tensor vec_value;
- optional<int64_t> vec_bdim;
- std::tie(vec_value, vec_bdim) = unwrapTensorAtLevel(vec, cur_level);
- batch_rule(self_value, self_bdim, mat_value, mat_bdim, vec_value, vec_bdim, beta, alpha);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor addr_generated_plumbing(const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(vec1, cur_level) && !isBatchedAtLevel(vec2, cur_level)) {
- return at::_ops::addr::call(self, vec1, vec2, beta, alpha);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor vec1_value;
- optional<int64_t> vec1_bdim;
- std::tie(vec1_value, vec1_bdim) = unwrapTensorAtLevel(vec1, cur_level);
- Tensor vec2_value;
- optional<int64_t> vec2_bdim;
- std::tie(vec2_value, vec2_bdim) = unwrapTensorAtLevel(vec2, cur_level);
- auto results = batch_rule(self_value, self_bdim, vec1_value, vec1_bdim, vec2_value, vec2_bdim, beta, alpha);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & addr__generated_plumbing(at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(vec1, cur_level) && !isBatchedAtLevel(vec2, cur_level)) {
- return at::_ops::addr_::call(self, vec1, vec2, beta, alpha);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor vec1_value;
- optional<int64_t> vec1_bdim;
- std::tie(vec1_value, vec1_bdim) = unwrapTensorAtLevel(vec1, cur_level);
- Tensor vec2_value;
- optional<int64_t> vec2_bdim;
- std::tie(vec2_value, vec2_bdim) = unwrapTensorAtLevel(vec2, cur_level);
- batch_rule(self_value, self_bdim, vec1_value, vec1_bdim, vec2_value, vec2_bdim, beta, alpha);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor affine_grid_generator_generated_plumbing(const at::Tensor & theta, at::IntArrayRef size, bool align_corners) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(theta, cur_level)) {
- return at::_ops::affine_grid_generator::call(theta, size, align_corners);
- }
- Tensor theta_value;
- optional<int64_t> theta_bdim;
- std::tie(theta_value, theta_bdim) = unwrapTensorAtLevel(theta, cur_level);
- auto results = batch_rule(theta_value, theta_bdim, size, align_corners);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor affine_grid_generator_backward_generated_plumbing(const at::Tensor & grad, at::IntArrayRef size, bool align_corners) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad, cur_level)) {
- return at::_ops::affine_grid_generator_backward::call(grad, size, align_corners);
- }
- Tensor grad_value;
- optional<int64_t> grad_bdim;
- std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
- auto results = batch_rule(grad_value, grad_bdim, size, align_corners);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _is_all_true_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_is_all_true::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _is_any_true_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_is_any_true::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _test_check_tensor_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_test_check_tensor::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor all_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::all_dim::call(self, dim, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, keepdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor all_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::all_dimname::call(self, dim, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, keepdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor any_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::any_dim::call(self, dim, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, keepdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor any_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::any_dimname::call(self, dim, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, keepdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _dim_arange_generated_plumbing(const at::Tensor & like, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(like, cur_level)) {
- return at::_ops::_dim_arange::call(like, dim);
- }
- Tensor like_value;
- optional<int64_t> like_bdim;
- std::tie(like_value, like_bdim) = unwrapTensorAtLevel(like, cur_level);
- auto results = batch_rule(like_value, like_bdim, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor argmax_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::argmax::call(self, dim, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, keepdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor argmin_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::argmin::call(self, dim, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, keepdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor acosh_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::acosh::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & acosh__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::acosh_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor arccosh_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::arccosh::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & arccosh__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::arccosh_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor asinh_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::asinh::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & asinh__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::asinh_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor arcsinh_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::arcsinh::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & arcsinh__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::arcsinh_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor atanh_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::atanh::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & atanh__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::atanh_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor arctanh_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::arctanh::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & arctanh__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::arctanh_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor as_strided_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::as_strided::call(self, size, stride, storage_offset);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, size, stride, storage_offset);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor asin_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::asin::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & asin__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::asin_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor arcsin_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::arcsin::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & arcsin__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::arcsin_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor atan_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::atan::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & atan__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::atan_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor arctan_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::arctan::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & arctan__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::arctan_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor atleast_1d_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::atleast_1d::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> atleast_1d_Sequence_generated_plumbing(at::TensorList tensors) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(tensors, cur_level)) {
- return at::_ops::atleast_1d_Sequence::call(tensors);
- }
- auto results = batch_rule(tensors);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor atleast_2d_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::atleast_2d::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> atleast_2d_Sequence_generated_plumbing(at::TensorList tensors) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(tensors, cur_level)) {
- return at::_ops::atleast_2d_Sequence::call(tensors);
- }
- auto results = batch_rule(tensors);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor atleast_3d_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::atleast_3d::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> atleast_3d_Sequence_generated_plumbing(at::TensorList tensors) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(tensors, cur_level)) {
- return at::_ops::atleast_3d_Sequence::call(tensors);
- }
- auto results = batch_rule(tensors);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor baddbmm_generated_plumbing(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(batch1, cur_level) && !isBatchedAtLevel(batch2, cur_level)) {
- return at::_ops::baddbmm::call(self, batch1, batch2, beta, alpha);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor batch1_value;
- optional<int64_t> batch1_bdim;
- std::tie(batch1_value, batch1_bdim) = unwrapTensorAtLevel(batch1, cur_level);
- Tensor batch2_value;
- optional<int64_t> batch2_bdim;
- std::tie(batch2_value, batch2_bdim) = unwrapTensorAtLevel(batch2, cur_level);
- auto results = batch_rule(self_value, self_bdim, batch1_value, batch1_bdim, batch2_value, batch2_bdim, beta, alpha);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & baddbmm__generated_plumbing(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(batch1, cur_level) && !isBatchedAtLevel(batch2, cur_level)) {
- return at::_ops::baddbmm_::call(self, batch1, batch2, beta, alpha);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor batch1_value;
- optional<int64_t> batch1_bdim;
- std::tie(batch1_value, batch1_bdim) = unwrapTensorAtLevel(batch1, cur_level);
- Tensor batch2_value;
- optional<int64_t> batch2_bdim;
- std::tie(batch2_value, batch2_bdim) = unwrapTensorAtLevel(batch2, cur_level);
- batch_rule(self_value, self_bdim, batch1_value, batch1_bdim, batch2_value, batch2_bdim, beta, alpha);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor batch_norm_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double momentum, double eps, bool cudnn_enabled) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
- return at::_ops::batch_norm::call(input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- optional<Tensor> weight_value;
- optional<int64_t> weight_bdim;
- if (weight) {
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
- }
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- optional<Tensor> running_mean_value;
- optional<int64_t> running_mean_bdim;
- if (running_mean) {
- std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
- }
- optional<Tensor> running_var_value;
- optional<int64_t> running_var_bdim;
- if (running_var) {
- std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, training, momentum, eps, cudnn_enabled);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor quantized_batch_norm_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & var, double eps, double output_scale, int64_t output_zero_point) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(var, cur_level)) {
- return at::_ops::quantized_batch_norm::call(input, weight, bias, mean, var, eps, output_scale, output_zero_point);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor mean_value;
- optional<int64_t> mean_bdim;
- std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level);
- Tensor var_value;
- optional<int64_t> var_bdim;
- std::tie(var_value, var_bdim) = unwrapTensorAtLevel(var, cur_level);
- optional<Tensor> weight_value;
- optional<int64_t> weight_bdim;
- if (weight) {
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
- }
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, mean_value, mean_bdim, var_value, var_bdim, eps, output_scale, output_zero_point);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _batch_norm_impl_index_backward_generated_plumbing(int64_t impl_index, const at::Tensor & input, const at::Tensor & grad_output, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var_transform, bool train, double eps, ::std::array<bool,3> output_mask, const at::Tensor & reservedSpace) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level) && !isBatchedAtLevel(save_mean, cur_level) && !isBatchedAtLevel(save_var_transform, cur_level) && !isBatchedAtLevel(reservedSpace, cur_level)) {
- return at::_ops::_batch_norm_impl_index_backward::call(impl_index, input, grad_output, weight, running_mean, running_var, save_mean, save_var_transform, train, eps, output_mask, reservedSpace);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor reservedSpace_value;
- optional<int64_t> reservedSpace_bdim;
- std::tie(reservedSpace_value, reservedSpace_bdim) = unwrapTensorAtLevel(reservedSpace, cur_level);
- optional<Tensor> weight_value;
- optional<int64_t> weight_bdim;
- if (weight) {
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
- }
- optional<Tensor> running_mean_value;
- optional<int64_t> running_mean_bdim;
- if (running_mean) {
- std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
- }
- optional<Tensor> running_var_value;
- optional<int64_t> running_var_bdim;
- if (running_var) {
- std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
- }
- optional<Tensor> save_mean_value;
- optional<int64_t> save_mean_bdim;
- if (save_mean) {
- std::tie(save_mean_value, save_mean_bdim) = unwrapTensorAtLevel(save_mean.value(), cur_level);
- }
- optional<Tensor> save_var_transform_value;
- optional<int64_t> save_var_transform_bdim;
- if (save_var_transform) {
- std::tie(save_var_transform_value, save_var_transform_bdim) = unwrapTensorAtLevel(save_var_transform.value(), cur_level);
- }
- auto results = batch_rule(impl_index, input_value, input_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, save_mean_value, save_mean_bdim, save_var_transform_value, save_var_transform_bdim, train, eps, output_mask, reservedSpace_value, reservedSpace_bdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor bernoulli_generated_plumbing(const at::Tensor & self, c10::optional<at::Generator> generator) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::bernoulli::call(self, generator);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, generator);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & bernoulli__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & p, c10::optional<at::Generator> generator) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(p, cur_level)) {
- return at::_ops::bernoulli__Tensor::call(self, p, generator);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor p_value;
- optional<int64_t> p_bdim;
- std::tie(p_value, p_bdim) = unwrapTensorAtLevel(p, cur_level);
- batch_rule(self_value, self_bdim, p_value, p_bdim, generator);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & bernoulli__float_generated_plumbing(at::Tensor & self, double p, c10::optional<at::Generator> generator) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::bernoulli__float::call(self, p, generator);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, p, generator);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor bernoulli_p_generated_plumbing(const at::Tensor & self, double p, c10::optional<at::Generator> generator) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::bernoulli_p::call(self, p, generator);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, p, generator);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor bilinear_generated_plumbing(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & weight, const c10::optional<at::Tensor> & bias) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input1, cur_level) && !isBatchedAtLevel(input2, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::bilinear::call(input1, input2, weight, bias);
- }
- Tensor input1_value;
- optional<int64_t> input1_bdim;
- std::tie(input1_value, input1_bdim) = unwrapTensorAtLevel(input1, cur_level);
- Tensor input2_value;
- optional<int64_t> input2_bdim;
- std::tie(input2_value, input2_bdim) = unwrapTensorAtLevel(input2, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(input1_value, input1_bdim, input2_value, input2_bdim, weight_value, weight_bdim, bias_value, bias_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor binary_cross_entropy_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
- return at::_ops::binary_cross_entropy::call(self, target, weight, reduction);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor target_value;
- optional<int64_t> target_bdim;
- std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
- optional<Tensor> weight_value;
- optional<int64_t> weight_bdim;
- if (weight) {
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor binary_cross_entropy_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
- return at::_ops::binary_cross_entropy_backward::call(grad_output, self, target, weight, reduction);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor target_value;
- optional<int64_t> target_bdim;
- std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
- optional<Tensor> weight_value;
- optional<int64_t> weight_bdim;
- if (weight) {
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
- }
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor binary_cross_entropy_with_logits_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & pos_weight, int64_t reduction) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(pos_weight, cur_level)) {
- return at::_ops::binary_cross_entropy_with_logits::call(self, target, weight, pos_weight, reduction);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor target_value;
- optional<int64_t> target_bdim;
- std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
- optional<Tensor> weight_value;
- optional<int64_t> weight_bdim;
- if (weight) {
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
- }
- optional<Tensor> pos_weight_value;
- optional<int64_t> pos_weight_bdim;
- if (pos_weight) {
- std::tie(pos_weight_value, pos_weight_bdim) = unwrapTensorAtLevel(pos_weight.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, pos_weight_value, pos_weight_bdim, reduction);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor bincount_generated_plumbing(const at::Tensor & self, const c10::optional<at::Tensor> & weights, int64_t minlength) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weights, cur_level)) {
- return at::_ops::bincount::call(self, weights, minlength);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- optional<Tensor> weights_value;
- optional<int64_t> weights_bdim;
- if (weights) {
- std::tie(weights_value, weights_bdim) = unwrapTensorAtLevel(weights.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, weights_value, weights_bdim, minlength);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor bitwise_not_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::bitwise_not::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & bitwise_not__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::bitwise_not_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor copysign_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::copysign_Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & copysign__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::copysign__Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor copysign_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::copysign_Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & copysign__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::copysign__Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, other);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor logical_not_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::logical_not::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & logical_not__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::logical_not_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor logical_xor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::logical_xor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & logical_xor__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::logical_xor_::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor logical_and_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::logical_and::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & logical_and__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::logical_and_::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor logical_or_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::logical_or::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & logical_or__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::logical_or_::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor bmm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
- return at::_ops::bmm::call(self, mat2);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor mat2_value;
- optional<int64_t> mat2_bdim;
- std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
- auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> broadcast_tensors_generated_plumbing(at::TensorList tensors) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(tensors, cur_level)) {
- return at::_ops::broadcast_tensors::call(tensors);
- }
- auto results = batch_rule(tensors);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor broadcast_to_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::broadcast_to::call(self, size);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, size);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _sparse_broadcast_to_generated_plumbing(const at::Tensor & self, at::IntArrayRef size) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_sparse_broadcast_to::call(self, size);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, size);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor cat_generated_plumbing(const at::ITensorListRef & tensors, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(tensors, cur_level)) {
- return at::_ops::cat::call(tensors, dim);
- }
- auto results = batch_rule(tensors, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor cat_names_generated_plumbing(at::TensorList tensors, at::Dimname dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(tensors, cur_level)) {
- return at::_ops::cat_names::call(tensors, dim);
- }
- auto results = batch_rule(tensors, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor concat_generated_plumbing(at::TensorList tensors, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(tensors, cur_level)) {
- return at::_ops::concat::call(tensors, dim);
- }
- auto results = batch_rule(tensors, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor concat_names_generated_plumbing(at::TensorList tensors, at::Dimname dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(tensors, cur_level)) {
- return at::_ops::concat_names::call(tensors, dim);
- }
- auto results = batch_rule(tensors, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor concatenate_generated_plumbing(at::TensorList tensors, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(tensors, cur_level)) {
- return at::_ops::concatenate::call(tensors, dim);
- }
- auto results = batch_rule(tensors, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor concatenate_names_generated_plumbing(at::TensorList tensors, at::Dimname dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(tensors, cur_level)) {
- return at::_ops::concatenate_names::call(tensors, dim);
- }
- auto results = batch_rule(tensors, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor block_diag_generated_plumbing(at::TensorList tensors) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(tensors, cur_level)) {
- return at::_ops::block_diag::call(tensors);
- }
- auto results = batch_rule(tensors);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor ceil_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::ceil::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & ceil__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::ceil_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor chain_matmul_generated_plumbing(at::TensorList matrices) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(matrices, cur_level)) {
- return at::_ops::chain_matmul::call(matrices);
- }
- auto results = batch_rule(matrices);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> unsafe_chunk_generated_plumbing(const at::Tensor & self, int64_t chunks, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::unsafe_chunk::call(self, chunks, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, chunks, dim);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> chunk_generated_plumbing(const at::Tensor & self, int64_t chunks, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::chunk::call(self, chunks, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, chunks, dim);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> tensor_split_sections_generated_plumbing(const at::Tensor & self, c10::SymInt sections, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::tensor_split_sections::call(self, sections, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, sections, dim);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> tensor_split_indices_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef indices, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::tensor_split_indices::call(self, indices, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, indices, dim);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> tensor_split_tensor_indices_or_sections_generated_plumbing(const at::Tensor & self, const at::Tensor & tensor_indices_or_sections, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor_indices_or_sections, cur_level)) {
- return at::_ops::tensor_split_tensor_indices_or_sections::call(self, tensor_indices_or_sections, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor tensor_indices_or_sections_value;
- optional<int64_t> tensor_indices_or_sections_bdim;
- std::tie(tensor_indices_or_sections_value, tensor_indices_or_sections_bdim) = unwrapTensorAtLevel(tensor_indices_or_sections, cur_level);
- auto results = batch_rule(self_value, self_bdim, tensor_indices_or_sections_value, tensor_indices_or_sections_bdim, dim);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor clamp_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::clamp::call(self, min, max);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, min, max);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor clamp_Tensor_generated_plumbing(const at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level) && !isBatchedAtLevel(max, cur_level)) {
- return at::_ops::clamp_Tensor::call(self, min, max);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- optional<Tensor> min_value;
- optional<int64_t> min_bdim;
- if (min) {
- std::tie(min_value, min_bdim) = unwrapTensorAtLevel(min.value(), cur_level);
- }
- optional<Tensor> max_value;
- optional<int64_t> max_bdim;
- if (max) {
- std::tie(max_value, max_bdim) = unwrapTensorAtLevel(max.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, min_value, min_bdim, max_value, max_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & clamp__generated_plumbing(at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::clamp_::call(self, min, max);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, min, max);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & clamp__Tensor_generated_plumbing(at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level) && !isBatchedAtLevel(max, cur_level)) {
- return at::_ops::clamp__Tensor::call(self, min, max);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- optional<Tensor> min_value;
- optional<int64_t> min_bdim;
- if (min) {
- std::tie(min_value, min_bdim) = unwrapTensorAtLevel(min.value(), cur_level);
- }
- optional<Tensor> max_value;
- optional<int64_t> max_bdim;
- if (max) {
- std::tie(max_value, max_bdim) = unwrapTensorAtLevel(max.value(), cur_level);
- }
- batch_rule(self_value, self_bdim, min_value, min_bdim, max_value, max_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor clamp_max_generated_plumbing(const at::Tensor & self, const at::Scalar & max) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::clamp_max::call(self, max);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, max);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor clamp_max_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & max) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(max, cur_level)) {
- return at::_ops::clamp_max_Tensor::call(self, max);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor max_value;
- optional<int64_t> max_bdim;
- std::tie(max_value, max_bdim) = unwrapTensorAtLevel(max, cur_level);
- auto results = batch_rule(self_value, self_bdim, max_value, max_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & clamp_max__generated_plumbing(at::Tensor & self, const at::Scalar & max) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::clamp_max_::call(self, max);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, max);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & clamp_max__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & max) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(max, cur_level)) {
- return at::_ops::clamp_max__Tensor::call(self, max);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor max_value;
- optional<int64_t> max_bdim;
- std::tie(max_value, max_bdim) = unwrapTensorAtLevel(max, cur_level);
- batch_rule(self_value, self_bdim, max_value, max_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor clamp_min_generated_plumbing(const at::Tensor & self, const at::Scalar & min) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::clamp_min::call(self, min);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, min);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor clamp_min_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & min) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level)) {
- return at::_ops::clamp_min_Tensor::call(self, min);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor min_value;
- optional<int64_t> min_bdim;
- std::tie(min_value, min_bdim) = unwrapTensorAtLevel(min, cur_level);
- auto results = batch_rule(self_value, self_bdim, min_value, min_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & clamp_min__generated_plumbing(at::Tensor & self, const at::Scalar & min) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::clamp_min_::call(self, min);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, min);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & clamp_min__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & min) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level)) {
- return at::_ops::clamp_min__Tensor::call(self, min);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor min_value;
- optional<int64_t> min_bdim;
- std::tie(min_value, min_bdim) = unwrapTensorAtLevel(min, cur_level);
- batch_rule(self_value, self_bdim, min_value, min_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor clip_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::clip::call(self, min, max);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, min, max);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor clip_Tensor_generated_plumbing(const at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level) && !isBatchedAtLevel(max, cur_level)) {
- return at::_ops::clip_Tensor::call(self, min, max);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- optional<Tensor> min_value;
- optional<int64_t> min_bdim;
- if (min) {
- std::tie(min_value, min_bdim) = unwrapTensorAtLevel(min.value(), cur_level);
- }
- optional<Tensor> max_value;
- optional<int64_t> max_bdim;
- if (max) {
- std::tie(max_value, max_bdim) = unwrapTensorAtLevel(max.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, min_value, min_bdim, max_value, max_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & clip__generated_plumbing(at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::clip_::call(self, min, max);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, min, max);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & clip__Tensor_generated_plumbing(at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level) && !isBatchedAtLevel(max, cur_level)) {
- return at::_ops::clip__Tensor::call(self, min, max);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- optional<Tensor> min_value;
- optional<int64_t> min_bdim;
- if (min) {
- std::tie(min_value, min_bdim) = unwrapTensorAtLevel(min.value(), cur_level);
- }
- optional<Tensor> max_value;
- optional<int64_t> max_bdim;
- if (max) {
- std::tie(max_value, max_bdim) = unwrapTensorAtLevel(max.value(), cur_level);
- }
- batch_rule(self_value, self_bdim, min_value, min_bdim, max_value, max_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor complex_generated_plumbing(const at::Tensor & real, const at::Tensor & imag) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(real, cur_level) && !isBatchedAtLevel(imag, cur_level)) {
- return at::_ops::complex::call(real, imag);
- }
- Tensor real_value;
- optional<int64_t> real_bdim;
- std::tie(real_value, real_bdim) = unwrapTensorAtLevel(real, cur_level);
- Tensor imag_value;
- optional<int64_t> imag_bdim;
- std::tie(imag_value, imag_bdim) = unwrapTensorAtLevel(imag, cur_level);
- auto results = batch_rule(real_value, real_bdim, imag_value, imag_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor polar_generated_plumbing(const at::Tensor & abs, const at::Tensor & angle) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(abs, cur_level) && !isBatchedAtLevel(angle, cur_level)) {
- return at::_ops::polar::call(abs, angle);
- }
- Tensor abs_value;
- optional<int64_t> abs_bdim;
- std::tie(abs_value, abs_bdim) = unwrapTensorAtLevel(abs, cur_level);
- Tensor angle_value;
- optional<int64_t> angle_bdim;
- std::tie(angle_value, angle_bdim) = unwrapTensorAtLevel(angle, cur_level);
- auto results = batch_rule(abs_value, abs_bdim, angle_value, angle_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor constant_pad_nd_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::constant_pad_nd::call(self, pad, value);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, pad, value);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor contiguous_generated_plumbing(const at::Tensor & self, at::MemoryFormat memory_format) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::contiguous::call(self, memory_format);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, memory_format);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor convolution_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::convolution::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, transposed, output_padding, groups);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> convolution_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalSymIntArrayRef bias_sizes, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
- return at::_ops::convolution_backward::call(grad_output, input, weight, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, weight_value, weight_bdim, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor convolution_overrideable_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::convolution_overrideable::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, transposed, output_padding, groups);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> convolution_backward_overrideable_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
- return at::_ops::convolution_backward_overrideable::call(grad_output, input, weight, stride, padding, dilation, transposed, output_padding, groups, output_mask);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, weight_value, weight_bdim, stride, padding, dilation, transposed, output_padding, groups, output_mask);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _convolution_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::_convolution::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _convolution_deprecated_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::_convolution_deprecated::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _convolution_mode_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::_convolution_mode::call(input, weight, bias, stride, padding, dilation, groups);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _convolution_double_backward_generated_plumbing(const c10::optional<at::Tensor> & ggI, const c10::optional<at::Tensor> & ggW, const c10::optional<at::Tensor> & ggb, const at::Tensor & gO, const at::Tensor & weight, const at::Tensor & self, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(ggI, cur_level) && !isBatchedAtLevel(ggW, cur_level) && !isBatchedAtLevel(ggb, cur_level) && !isBatchedAtLevel(gO, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_convolution_double_backward::call(ggI, ggW, ggb, gO, weight, self, stride, padding, dilation, transposed, output_padding, groups, output_mask);
- }
- Tensor gO_value;
- optional<int64_t> gO_bdim;
- std::tie(gO_value, gO_bdim) = unwrapTensorAtLevel(gO, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- optional<Tensor> ggI_value;
- optional<int64_t> ggI_bdim;
- if (ggI) {
- std::tie(ggI_value, ggI_bdim) = unwrapTensorAtLevel(ggI.value(), cur_level);
- }
- optional<Tensor> ggW_value;
- optional<int64_t> ggW_bdim;
- if (ggW) {
- std::tie(ggW_value, ggW_bdim) = unwrapTensorAtLevel(ggW.value(), cur_level);
- }
- optional<Tensor> ggb_value;
- optional<int64_t> ggb_bdim;
- if (ggb) {
- std::tie(ggb_value, ggb_bdim) = unwrapTensorAtLevel(ggb.value(), cur_level);
- }
- auto results = batch_rule(ggI_value, ggI_bdim, ggW_value, ggW_bdim, ggb_value, ggb_bdim, gO_value, gO_bdim, weight_value, weight_bdim, self_value, self_bdim, stride, padding, dilation, transposed, output_padding, groups, output_mask);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor conv1d_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::conv1d::call(input, weight, bias, stride, padding, dilation, groups);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor conv2d_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::conv2d::call(input, weight, bias, stride, padding, dilation, groups);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor conv3d_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::conv3d::call(input, weight, bias, stride, padding, dilation, groups);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor conv1d_padding_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::conv1d_padding::call(input, weight, bias, stride, padding, dilation, groups);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor conv2d_padding_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::conv2d_padding::call(input, weight, bias, stride, padding, dilation, groups);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor conv3d_padding_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::conv3d_padding::call(input, weight, bias, stride, padding, dilation, groups);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor conv_tbc_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::conv_tbc::call(self, weight, bias, pad);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- Tensor bias_value;
- optional<int64_t> bias_bdim;
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias, cur_level);
- auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, pad);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> conv_tbc_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, int64_t pad) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::conv_tbc_backward::call(self, input, weight, bias, pad);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- Tensor bias_value;
- optional<int64_t> bias_bdim;
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias, cur_level);
- auto results = batch_rule(self_value, self_bdim, input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, pad);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor conv_transpose1d_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, int64_t groups, at::IntArrayRef dilation) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::conv_transpose1d::call(input, weight, bias, stride, padding, output_padding, groups, dilation);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, output_padding, groups, dilation);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor conv_transpose2d_input_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, int64_t groups, at::IntArrayRef dilation) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::conv_transpose2d_input::call(input, weight, bias, stride, padding, output_padding, groups, dilation);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, output_padding, groups, dilation);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor conv_transpose3d_input_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, int64_t groups, at::IntArrayRef dilation) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::conv_transpose3d_input::call(input, weight, bias, stride, padding, output_padding, groups, dilation);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, output_padding, groups, dilation);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor copy_generated_plumbing(const at::Tensor & self, const at::Tensor & src, bool non_blocking) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
- return at::_ops::copy::call(self, src, non_blocking);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor src_value;
- optional<int64_t> src_bdim;
- std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
- auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, non_blocking);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & copy__generated_plumbing(at::Tensor & self, const at::Tensor & src, bool non_blocking) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
- return at::_ops::copy_::call(self, src, non_blocking);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor src_value;
- optional<int64_t> src_bdim;
- std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
- batch_rule(self_value, self_bdim, src_value, src_bdim, non_blocking);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _copy_from_generated_plumbing(const at::Tensor & self, const at::Tensor & dst, bool non_blocking) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(dst, cur_level)) {
- return at::_ops::_copy_from::call(self, dst, non_blocking);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor dst_value;
- optional<int64_t> dst_bdim;
- std::tie(dst_value, dst_bdim) = unwrapTensorAtLevel(dst, cur_level);
- auto results = batch_rule(self_value, self_bdim, dst_value, dst_bdim, non_blocking);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _copy_from_and_resize_generated_plumbing(const at::Tensor & self, const at::Tensor & dst) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(dst, cur_level)) {
- return at::_ops::_copy_from_and_resize::call(self, dst);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor dst_value;
- optional<int64_t> dst_bdim;
- std::tie(dst_value, dst_bdim) = unwrapTensorAtLevel(dst, cur_level);
- auto results = batch_rule(self_value, self_bdim, dst_value, dst_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor cos_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::cos::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & cos__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::cos_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor cosh_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::cosh::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & cosh__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::cosh_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor cosine_embedding_loss_generated_plumbing(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input1, cur_level) && !isBatchedAtLevel(input2, cur_level) && !isBatchedAtLevel(target, cur_level)) {
- return at::_ops::cosine_embedding_loss::call(input1, input2, target, margin, reduction);
- }
- Tensor input1_value;
- optional<int64_t> input1_bdim;
- std::tie(input1_value, input1_bdim) = unwrapTensorAtLevel(input1, cur_level);
- Tensor input2_value;
- optional<int64_t> input2_bdim;
- std::tie(input2_value, input2_bdim) = unwrapTensorAtLevel(input2, cur_level);
- Tensor target_value;
- optional<int64_t> target_bdim;
- std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
- auto results = batch_rule(input1_value, input1_bdim, input2_value, input2_bdim, target_value, target_bdim, margin, reduction);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor count_nonzero_dim_IntList_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::count_nonzero_dim_IntList::call(self, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor count_nonzero_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::count_nonzero::call(self, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor cov_generated_plumbing(const at::Tensor & self, int64_t correction, const c10::optional<at::Tensor> & fweights, const c10::optional<at::Tensor> & aweights) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(fweights, cur_level) && !isBatchedAtLevel(aweights, cur_level)) {
- return at::_ops::cov::call(self, correction, fweights, aweights);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- optional<Tensor> fweights_value;
- optional<int64_t> fweights_bdim;
- if (fweights) {
- std::tie(fweights_value, fweights_bdim) = unwrapTensorAtLevel(fweights.value(), cur_level);
- }
- optional<Tensor> aweights_value;
- optional<int64_t> aweights_bdim;
- if (aweights) {
- std::tie(aweights_value, aweights_bdim) = unwrapTensorAtLevel(aweights.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, correction, fweights_value, fweights_bdim, aweights_value, aweights_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor corrcoef_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::corrcoef::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor cudnn_affine_grid_generator_generated_plumbing(const at::Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(theta, cur_level)) {
- return at::_ops::cudnn_affine_grid_generator::call(theta, N, C, H, W);
- }
- Tensor theta_value;
- optional<int64_t> theta_bdim;
- std::tie(theta_value, theta_bdim) = unwrapTensorAtLevel(theta, cur_level);
- auto results = batch_rule(theta_value, theta_bdim, N, C, H, W);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor cudnn_affine_grid_generator_backward_generated_plumbing(const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad, cur_level)) {
- return at::_ops::cudnn_affine_grid_generator_backward::call(grad, N, C, H, W);
- }
- Tensor grad_value;
- optional<int64_t> grad_bdim;
- std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
- auto results = batch_rule(grad_value, grad_bdim, N, C, H, W);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> cudnn_batch_norm_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
- return at::_ops::cudnn_batch_norm::call(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- optional<Tensor> running_mean_value;
- optional<int64_t> running_mean_bdim;
- if (running_mean) {
- std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
- }
- optional<Tensor> running_var_value;
- optional<int64_t> running_var_bdim;
- if (running_var) {
- std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, training, exponential_average_factor, epsilon);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> cudnn_batch_norm_backward_generated_plumbing(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var, double epsilon, const at::Tensor & reserveSpace) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level) && !isBatchedAtLevel(save_mean, cur_level) && !isBatchedAtLevel(save_var, cur_level) && !isBatchedAtLevel(reserveSpace, cur_level)) {
- return at::_ops::cudnn_batch_norm_backward::call(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, reserveSpace);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- Tensor reserveSpace_value;
- optional<int64_t> reserveSpace_bdim;
- std::tie(reserveSpace_value, reserveSpace_bdim) = unwrapTensorAtLevel(reserveSpace, cur_level);
- optional<Tensor> running_mean_value;
- optional<int64_t> running_mean_bdim;
- if (running_mean) {
- std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
- }
- optional<Tensor> running_var_value;
- optional<int64_t> running_var_bdim;
- if (running_var) {
- std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
- }
- optional<Tensor> save_mean_value;
- optional<int64_t> save_mean_bdim;
- if (save_mean) {
- std::tie(save_mean_value, save_mean_bdim) = unwrapTensorAtLevel(save_mean.value(), cur_level);
- }
- optional<Tensor> save_var_value;
- optional<int64_t> save_var_bdim;
- if (save_var) {
- std::tie(save_var_value, save_var_bdim) = unwrapTensorAtLevel(save_var.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, save_mean_value, save_mean_bdim, save_var_value, save_var_bdim, epsilon, reserveSpace_value, reserveSpace_bdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor cudnn_convolution_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
- return at::_ops::cudnn_convolution::call(self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor cudnn_convolution_transpose_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
- return at::_ops::cudnn_convolution_transpose::call(self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _mps_convolution_transpose_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
- return at::_ops::_mps_convolution_transpose::call(self, weight, padding, output_padding, stride, dilation, groups);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, padding, output_padding, stride, dilation, groups);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> mps_convolution_transpose_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array<bool,2> output_mask) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
- return at::_ops::mps_convolution_transpose_backward::call(self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- auto results = batch_rule(self_value, self_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, padding, output_padding, stride, dilation, groups, output_mask);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor cudnn_convolution_relu_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::cudnn_convolution_relu::call(self, weight, bias, stride, padding, dilation, groups);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor cudnn_convolution_add_relu_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional<at::Scalar> & alpha, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(z, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::cudnn_convolution_add_relu::call(self, weight, z, alpha, bias, stride, padding, dilation, groups);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- Tensor z_value;
- optional<int64_t> z_bdim;
- std::tie(z_value, z_bdim) = unwrapTensorAtLevel(z, cur_level);
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, z_value, z_bdim, alpha, bias_value, bias_bdim, stride, padding, dilation, groups);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor cudnn_grid_sampler_generated_plumbing(const at::Tensor & self, const at::Tensor & grid) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grid, cur_level)) {
- return at::_ops::cudnn_grid_sampler::call(self, grid);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor grid_value;
- optional<int64_t> grid_bdim;
- std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level);
- auto results = batch_rule(self_value, self_bdim, grid_value, grid_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> cudnn_grid_sampler_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grid, const at::Tensor & grad_output) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grid, cur_level) && !isBatchedAtLevel(grad_output, cur_level)) {
- return at::_ops::cudnn_grid_sampler_backward::call(self, grid, grad_output);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor grid_value;
- optional<int64_t> grid_bdim;
- std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level);
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- auto results = batch_rule(self_value, self_bdim, grid_value, grid_bdim, grad_output_value, grad_output_bdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> cummax_generated_plumbing(const at::Tensor & self, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::cummax::call(self, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> cummax_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::cummax_dimname::call(self, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _cummax_helper_generated_plumbing(const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(values, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
- return at::_ops::_cummax_helper::call(self, values, indices, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor values_value;
- optional<int64_t> values_bdim;
- std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
- Tensor indices_value;
- optional<int64_t> indices_bdim;
- std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
- batch_rule(self_value, self_bdim, values_value, values_bdim, indices_value, indices_bdim, dim);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> cummin_generated_plumbing(const at::Tensor & self, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::cummin::call(self, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> cummin_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::cummin_dimname::call(self, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _cummin_helper_generated_plumbing(const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(values, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
- return at::_ops::_cummin_helper::call(self, values, indices, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor values_value;
- optional<int64_t> values_bdim;
- std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
- Tensor indices_value;
- optional<int64_t> indices_bdim;
- std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
- batch_rule(self_value, self_bdim, values_value, values_bdim, indices_value, indices_bdim, dim);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor cummaxmin_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & indices, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
- return at::_ops::cummaxmin_backward::call(grad, input, indices, dim);
- }
- Tensor grad_value;
- optional<int64_t> grad_bdim;
- std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor indices_value;
- optional<int64_t> indices_bdim;
- std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
- auto results = batch_rule(grad_value, grad_bdim, input_value, input_bdim, indices_value, indices_bdim, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor cumprod_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::cumprod::call(self, dim, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & cumprod__generated_plumbing(at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::cumprod_::call(self, dim, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, dim, dtype);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor cumprod_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::cumprod_dimname::call(self, dim, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & cumprod__dimname_generated_plumbing(at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::cumprod__dimname::call(self, dim, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, dim, dtype);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor cumprod_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & input, int64_t dim, const at::Tensor & output) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(output, cur_level)) {
- return at::_ops::cumprod_backward::call(grad, input, dim, output);
- }
- Tensor grad_value;
- optional<int64_t> grad_bdim;
- std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor output_value;
- optional<int64_t> output_bdim;
- std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
- auto results = batch_rule(grad_value, grad_bdim, input_value, input_bdim, dim, output_value, output_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor cumsum_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::cumsum::call(self, dim, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & cumsum__generated_plumbing(at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::cumsum_::call(self, dim, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, dim, dtype);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor cumsum_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::cumsum_dimname::call(self, dim, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & cumsum__dimname_generated_plumbing(at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::cumsum__dimname::call(self, dim, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, dim, dtype);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor cumulative_trapezoid_x_generated_plumbing(const at::Tensor & y, const at::Tensor & x, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(y, cur_level) && !isBatchedAtLevel(x, cur_level)) {
- return at::_ops::cumulative_trapezoid_x::call(y, x, dim);
- }
- Tensor y_value;
- optional<int64_t> y_bdim;
- std::tie(y_value, y_bdim) = unwrapTensorAtLevel(y, cur_level);
- Tensor x_value;
- optional<int64_t> x_bdim;
- std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
- auto results = batch_rule(y_value, y_bdim, x_value, x_bdim, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor cumulative_trapezoid_dx_generated_plumbing(const at::Tensor & y, const at::Scalar & dx, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(y, cur_level)) {
- return at::_ops::cumulative_trapezoid_dx::call(y, dx, dim);
- }
- Tensor y_value;
- optional<int64_t> y_bdim;
- std::tie(y_value, y_bdim) = unwrapTensorAtLevel(y, cur_level);
- auto results = batch_rule(y_value, y_bdim, dx, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor ctc_loss_IntList_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, int64_t reduction, bool zero_infinity) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level)) {
- return at::_ops::ctc_loss_IntList::call(log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity);
- }
- Tensor log_probs_value;
- optional<int64_t> log_probs_bdim;
- std::tie(log_probs_value, log_probs_bdim) = unwrapTensorAtLevel(log_probs, cur_level);
- Tensor targets_value;
- optional<int64_t> targets_bdim;
- std::tie(targets_value, targets_bdim) = unwrapTensorAtLevel(targets, cur_level);
- auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths, target_lengths, blank, reduction, zero_infinity);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor ctc_loss_Tensor_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, int64_t reduction, bool zero_infinity) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level) && !isBatchedAtLevel(input_lengths, cur_level) && !isBatchedAtLevel(target_lengths, cur_level)) {
- return at::_ops::ctc_loss_Tensor::call(log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity);
- }
- Tensor log_probs_value;
- optional<int64_t> log_probs_bdim;
- std::tie(log_probs_value, log_probs_bdim) = unwrapTensorAtLevel(log_probs, cur_level);
- Tensor targets_value;
- optional<int64_t> targets_bdim;
- std::tie(targets_value, targets_bdim) = unwrapTensorAtLevel(targets, cur_level);
- Tensor input_lengths_value;
- optional<int64_t> input_lengths_bdim;
- std::tie(input_lengths_value, input_lengths_bdim) = unwrapTensorAtLevel(input_lengths, cur_level);
- Tensor target_lengths_value;
- optional<int64_t> target_lengths_bdim;
- std::tie(target_lengths_value, target_lengths_bdim) = unwrapTensorAtLevel(target_lengths, cur_level);
- auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths_value, input_lengths_bdim, target_lengths_value, target_lengths_bdim, blank, reduction, zero_infinity);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> _ctc_loss_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool zero_infinity) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level)) {
- return at::_ops::_ctc_loss::call(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity);
- }
- Tensor log_probs_value;
- optional<int64_t> log_probs_bdim;
- std::tie(log_probs_value, log_probs_bdim) = unwrapTensorAtLevel(log_probs, cur_level);
- Tensor targets_value;
- optional<int64_t> targets_bdim;
- std::tie(targets_value, targets_bdim) = unwrapTensorAtLevel(targets, cur_level);
- auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths, target_lengths, blank, zero_infinity);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> _ctc_loss_Tensor_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool zero_infinity) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level) && !isBatchedAtLevel(input_lengths, cur_level) && !isBatchedAtLevel(target_lengths, cur_level)) {
- return at::_ops::_ctc_loss_Tensor::call(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity);
- }
- Tensor log_probs_value;
- optional<int64_t> log_probs_bdim;
- std::tie(log_probs_value, log_probs_bdim) = unwrapTensorAtLevel(log_probs, cur_level);
- Tensor targets_value;
- optional<int64_t> targets_bdim;
- std::tie(targets_value, targets_bdim) = unwrapTensorAtLevel(targets, cur_level);
- Tensor input_lengths_value;
- optional<int64_t> input_lengths_bdim;
- std::tie(input_lengths_value, input_lengths_bdim) = unwrapTensorAtLevel(input_lengths, cur_level);
- Tensor target_lengths_value;
- optional<int64_t> target_lengths_bdim;
- std::tie(target_lengths_value, target_lengths_bdim) = unwrapTensorAtLevel(target_lengths, cur_level);
- auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths_value, input_lengths_bdim, target_lengths_value, target_lengths_bdim, blank, zero_infinity);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _ctc_loss_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level) && !isBatchedAtLevel(neg_log_likelihood, cur_level) && !isBatchedAtLevel(log_alpha, cur_level)) {
- return at::_ops::_ctc_loss_backward::call(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity);
- }
- Tensor grad_value;
- optional<int64_t> grad_bdim;
- std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
- Tensor log_probs_value;
- optional<int64_t> log_probs_bdim;
- std::tie(log_probs_value, log_probs_bdim) = unwrapTensorAtLevel(log_probs, cur_level);
- Tensor targets_value;
- optional<int64_t> targets_bdim;
- std::tie(targets_value, targets_bdim) = unwrapTensorAtLevel(targets, cur_level);
- Tensor neg_log_likelihood_value;
- optional<int64_t> neg_log_likelihood_bdim;
- std::tie(neg_log_likelihood_value, neg_log_likelihood_bdim) = unwrapTensorAtLevel(neg_log_likelihood, cur_level);
- Tensor log_alpha_value;
- optional<int64_t> log_alpha_bdim;
- std::tie(log_alpha_value, log_alpha_bdim) = unwrapTensorAtLevel(log_alpha, cur_level);
- auto results = batch_rule(grad_value, grad_bdim, log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths, target_lengths, neg_log_likelihood_value, neg_log_likelihood_bdim, log_alpha_value, log_alpha_bdim, blank, zero_infinity);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _ctc_loss_backward_Tensor_generated_plumbing(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level) && !isBatchedAtLevel(input_lengths, cur_level) && !isBatchedAtLevel(target_lengths, cur_level) && !isBatchedAtLevel(neg_log_likelihood, cur_level) && !isBatchedAtLevel(log_alpha, cur_level)) {
- return at::_ops::_ctc_loss_backward_Tensor::call(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity);
- }
- Tensor grad_value;
- optional<int64_t> grad_bdim;
- std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
- Tensor log_probs_value;
- optional<int64_t> log_probs_bdim;
- std::tie(log_probs_value, log_probs_bdim) = unwrapTensorAtLevel(log_probs, cur_level);
- Tensor targets_value;
- optional<int64_t> targets_bdim;
- std::tie(targets_value, targets_bdim) = unwrapTensorAtLevel(targets, cur_level);
- Tensor input_lengths_value;
- optional<int64_t> input_lengths_bdim;
- std::tie(input_lengths_value, input_lengths_bdim) = unwrapTensorAtLevel(input_lengths, cur_level);
- Tensor target_lengths_value;
- optional<int64_t> target_lengths_bdim;
- std::tie(target_lengths_value, target_lengths_bdim) = unwrapTensorAtLevel(target_lengths, cur_level);
- Tensor neg_log_likelihood_value;
- optional<int64_t> neg_log_likelihood_bdim;
- std::tie(neg_log_likelihood_value, neg_log_likelihood_bdim) = unwrapTensorAtLevel(neg_log_likelihood, cur_level);
- Tensor log_alpha_value;
- optional<int64_t> log_alpha_bdim;
- std::tie(log_alpha_value, log_alpha_bdim) = unwrapTensorAtLevel(log_alpha, cur_level);
- auto results = batch_rule(grad_value, grad_bdim, log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths_value, input_lengths_bdim, target_lengths_value, target_lengths_bdim, neg_log_likelihood_value, neg_log_likelihood_bdim, log_alpha_value, log_alpha_bdim, blank, zero_infinity);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor diag_embed_generated_plumbing(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::diag_embed::call(self, offset, dim1, dim2);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, offset, dim1, dim2);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor diagflat_generated_plumbing(const at::Tensor & self, int64_t offset) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::diagflat::call(self, offset);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, offset);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor diagonal_generated_plumbing(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::diagonal::call(self, offset, dim1, dim2);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, offset, dim1, dim2);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor linalg_diagonal_generated_plumbing(const at::Tensor & A, int64_t offset, int64_t dim1, int64_t dim2) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(A, cur_level)) {
- return at::_ops::linalg_diagonal::call(A, offset, dim1, dim2);
- }
- Tensor A_value;
- optional<int64_t> A_bdim;
- std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
- auto results = batch_rule(A_value, A_bdim, offset, dim1, dim2);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor diagonal_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname outdim, at::Dimname dim1, at::Dimname dim2, int64_t offset) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::diagonal_Dimname::call(self, outdim, dim1, dim2, offset);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, outdim, dim1, dim2, offset);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor diagonal_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level)) {
- return at::_ops::diagonal_backward::call(grad_output, input_sizes, offset, dim1, dim2);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, input_sizes, offset, dim1, dim2);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & fill_diagonal__generated_plumbing(at::Tensor & self, const at::Scalar & fill_value, bool wrap) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::fill_diagonal_::call(self, fill_value, wrap);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, fill_value, wrap);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor diff_generated_plumbing(const at::Tensor & self, int64_t n, int64_t dim, const c10::optional<at::Tensor> & prepend, const c10::optional<at::Tensor> & append) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(prepend, cur_level) && !isBatchedAtLevel(append, cur_level)) {
- return at::_ops::diff::call(self, n, dim, prepend, append);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- optional<Tensor> prepend_value;
- optional<int64_t> prepend_bdim;
- if (prepend) {
- std::tie(prepend_value, prepend_bdim) = unwrapTensorAtLevel(prepend.value(), cur_level);
- }
- optional<Tensor> append_value;
- optional<int64_t> append_bdim;
- if (append) {
- std::tie(append_value, append_bdim) = unwrapTensorAtLevel(append.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, n, dim, prepend_value, prepend_bdim, append_value, append_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> gradient_scalarint_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & spacing, c10::optional<int64_t> dim, int64_t edge_order) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::gradient_scalarint::call(self, spacing, dim, edge_order);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> gradient_scalararray_generated_plumbing(const at::Tensor & self, const at::Scalar & spacing, at::IntArrayRef dim, int64_t edge_order) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::gradient_scalararray::call(self, spacing, dim, edge_order);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> gradient_array_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, int64_t edge_order) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::gradient_array::call(self, dim, edge_order);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, edge_order);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> gradient_scalarrayint_generated_plumbing(const at::Tensor & self, at::ArrayRef<at::Scalar> spacing, c10::optional<int64_t> dim, int64_t edge_order) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::gradient_scalarrayint::call(self, spacing, dim, edge_order);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> gradient_scalarrayarray_generated_plumbing(const at::Tensor & self, at::ArrayRef<at::Scalar> spacing, at::IntArrayRef dim, int64_t edge_order) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::gradient_scalarrayarray::call(self, spacing, dim, edge_order);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> gradient_tensorarrayint_generated_plumbing(const at::Tensor & self, at::TensorList spacing, c10::optional<int64_t> dim, int64_t edge_order) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(spacing, cur_level)) {
- return at::_ops::gradient_tensorarrayint::call(self, spacing, dim, edge_order);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> gradient_tensorarray_generated_plumbing(const at::Tensor & self, at::TensorList spacing, at::IntArrayRef dim, int64_t edge_order) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(spacing, cur_level)) {
- return at::_ops::gradient_tensorarray::call(self, spacing, dim, edge_order);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor div_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::div_Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & div__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::div__Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor div_Tensor_mode_generated_plumbing(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::div_Tensor_mode::call(self, other, rounding_mode);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, rounding_mode);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & div__Tensor_mode_generated_plumbing(at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::div__Tensor_mode::call(self, other, rounding_mode);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim, rounding_mode);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor div_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::div_Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & div__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::div__Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, other);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor div_Scalar_mode_generated_plumbing(const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::div_Scalar_mode::call(self, other, rounding_mode);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other, rounding_mode);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & div__Scalar_mode_generated_plumbing(at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::div__Scalar_mode::call(self, other, rounding_mode);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, other, rounding_mode);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor divide_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::divide_Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & divide__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::divide__Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor divide_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::divide_Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & divide__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::divide__Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, other);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor divide_Tensor_mode_generated_plumbing(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::divide_Tensor_mode::call(self, other, rounding_mode);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, rounding_mode);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & divide__Tensor_mode_generated_plumbing(at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::divide__Tensor_mode::call(self, other, rounding_mode);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim, rounding_mode);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor divide_Scalar_mode_generated_plumbing(const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::divide_Scalar_mode::call(self, other, rounding_mode);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other, rounding_mode);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & divide__Scalar_mode_generated_plumbing(at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::divide__Scalar_mode::call(self, other, rounding_mode);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, other, rounding_mode);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor true_divide_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::true_divide_Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & true_divide__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::true_divide__Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor true_divide_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::true_divide_Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & true_divide__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::true_divide__Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, other);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor dot_generated_plumbing(const at::Tensor & self, const at::Tensor & tensor) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor, cur_level)) {
- return at::_ops::dot::call(self, tensor);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor tensor_value;
- optional<int64_t> tensor_bdim;
- std::tie(tensor_value, tensor_bdim) = unwrapTensorAtLevel(tensor, cur_level);
- auto results = batch_rule(self_value, self_bdim, tensor_value, tensor_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor vdot_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::vdot::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor einsum_generated_plumbing(c10::string_view equation, at::TensorList tensors, at::OptionalIntArrayRef path) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(tensors, cur_level)) {
- return at::_ops::einsum::call(equation, tensors, path);
- }
- auto results = batch_rule(equation, tensors, path);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor embedding_generated_plumbing(const at::Tensor & weight, const at::Tensor & indices, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
- return at::_ops::embedding::call(weight, indices, padding_idx, scale_grad_by_freq, sparse);
- }
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- Tensor indices_value;
- optional<int64_t> indices_bdim;
- std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
- auto results = batch_rule(weight_value, weight_bdim, indices_value, indices_bdim, padding_idx, scale_grad_by_freq, sparse);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor embedding_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
- return at::_ops::embedding_backward::call(grad, indices, num_weights, padding_idx, scale_grad_by_freq, sparse);
- }
- Tensor grad_value;
- optional<int64_t> grad_bdim;
- std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
- Tensor indices_value;
- optional<int64_t> indices_bdim;
- std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
- auto results = batch_rule(grad_value, grad_bdim, indices_value, indices_bdim, num_weights, padding_idx, scale_grad_by_freq, sparse);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor embedding_dense_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
- return at::_ops::embedding_dense_backward::call(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor indices_value;
- optional<int64_t> indices_bdim;
- std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, indices_value, indices_bdim, num_weights, padding_idx, scale_grad_by_freq);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & embedding_renorm__generated_plumbing(at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
- return at::_ops::embedding_renorm_::call(self, indices, max_norm, norm_type);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor indices_value;
- optional<int64_t> indices_bdim;
- std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
- batch_rule(self_value, self_bdim, indices_value, indices_bdim, max_norm, norm_type);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor embedding_sparse_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
- return at::_ops::embedding_sparse_backward::call(grad, indices, num_weights, padding_idx, scale_grad_by_freq);
- }
- Tensor grad_value;
- optional<int64_t> grad_bdim;
- std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
- Tensor indices_value;
- optional<int64_t> indices_bdim;
- std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
- auto results = batch_rule(grad_value, grad_bdim, indices_value, indices_bdim, num_weights, padding_idx, scale_grad_by_freq);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _embedding_bag_forward_only_generated_plumbing(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) {
- return at::_ops::_embedding_bag_forward_only::call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx);
- }
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- Tensor indices_value;
- optional<int64_t> indices_bdim;
- std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
- Tensor offsets_value;
- optional<int64_t> offsets_bdim;
- std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level);
- optional<Tensor> per_sample_weights_value;
- optional<int64_t> per_sample_weights_bdim;
- if (per_sample_weights) {
- std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level);
- }
- auto results = batch_rule(weight_value, weight_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, scale_grad_by_freq, mode, sparse, per_sample_weights_value, per_sample_weights_bdim, include_last_offset, padding_idx);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> _rowwise_prune_generated_plumbing(const at::Tensor & weight, const at::Tensor & mask, at::ScalarType compressed_indices_dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
- return at::_ops::_rowwise_prune::call(weight, mask, compressed_indices_dtype);
- }
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- Tensor mask_value;
- optional<int64_t> mask_bdim;
- std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
- auto results = batch_rule(weight_value, weight_bdim, mask_value, mask_bdim, compressed_indices_dtype);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor row_stack_generated_plumbing(at::TensorList tensors) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(tensors, cur_level)) {
- return at::_ops::row_stack::call(tensors);
- }
- auto results = batch_rule(tensors);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> embedding_bag_generated_plumbing(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) {
- return at::_ops::embedding_bag::call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset);
- }
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- Tensor indices_value;
- optional<int64_t> indices_bdim;
- std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
- Tensor offsets_value;
- optional<int64_t> offsets_bdim;
- std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level);
- optional<Tensor> per_sample_weights_value;
- optional<int64_t> per_sample_weights_bdim;
- if (per_sample_weights) {
- std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level);
- }
- auto results = batch_rule(weight_value, weight_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, scale_grad_by_freq, mode, sparse, per_sample_weights_value, per_sample_weights_bdim, include_last_offset);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> embedding_bag_padding_idx_generated_plumbing(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, c10::optional<int64_t> padding_idx) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) {
- return at::_ops::embedding_bag_padding_idx::call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx);
- }
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- Tensor indices_value;
- optional<int64_t> indices_bdim;
- std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
- Tensor offsets_value;
- optional<int64_t> offsets_bdim;
- std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level);
- optional<Tensor> per_sample_weights_value;
- optional<int64_t> per_sample_weights_bdim;
- if (per_sample_weights) {
- std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level);
- }
- auto results = batch_rule(weight_value, weight_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, scale_grad_by_freq, mode, sparse, per_sample_weights_value, per_sample_weights_bdim, include_last_offset, padding_idx);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _embedding_bag_generated_plumbing(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) {
- return at::_ops::_embedding_bag::call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx);
- }
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- Tensor indices_value;
- optional<int64_t> indices_bdim;
- std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
- Tensor offsets_value;
- optional<int64_t> offsets_bdim;
- std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level);
- optional<Tensor> per_sample_weights_value;
- optional<int64_t> per_sample_weights_bdim;
- if (per_sample_weights) {
- std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level);
- }
- auto results = batch_rule(weight_value, weight_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, scale_grad_by_freq, mode, sparse, per_sample_weights_value, per_sample_weights_bdim, include_last_offset, padding_idx);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _embedding_bag_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(offset2bag, cur_level) && !isBatchedAtLevel(bag_size, cur_level) && !isBatchedAtLevel(maximum_indices, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) {
- return at::_ops::_embedding_bag_backward::call(grad, indices, offsets, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights, padding_idx);
- }
- Tensor grad_value;
- optional<int64_t> grad_bdim;
- std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
- Tensor indices_value;
- optional<int64_t> indices_bdim;
- std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
- Tensor offsets_value;
- optional<int64_t> offsets_bdim;
- std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level);
- Tensor offset2bag_value;
- optional<int64_t> offset2bag_bdim;
- std::tie(offset2bag_value, offset2bag_bdim) = unwrapTensorAtLevel(offset2bag, cur_level);
- Tensor bag_size_value;
- optional<int64_t> bag_size_bdim;
- std::tie(bag_size_value, bag_size_bdim) = unwrapTensorAtLevel(bag_size, cur_level);
- Tensor maximum_indices_value;
- optional<int64_t> maximum_indices_bdim;
- std::tie(maximum_indices_value, maximum_indices_bdim) = unwrapTensorAtLevel(maximum_indices, cur_level);
- optional<Tensor> per_sample_weights_value;
- optional<int64_t> per_sample_weights_bdim;
- if (per_sample_weights) {
- std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level);
- }
- auto results = batch_rule(grad_value, grad_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, offset2bag_value, offset2bag_bdim, bag_size_value, bag_size_bdim, maximum_indices_value, maximum_indices_bdim, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights_value, per_sample_weights_bdim, padding_idx);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _embedding_bag_sparse_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(offset2bag, cur_level) && !isBatchedAtLevel(bag_size, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) {
- return at::_ops::_embedding_bag_sparse_backward::call(grad, indices, offsets, offset2bag, bag_size, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx);
- }
- Tensor grad_value;
- optional<int64_t> grad_bdim;
- std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
- Tensor indices_value;
- optional<int64_t> indices_bdim;
- std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
- Tensor offsets_value;
- optional<int64_t> offsets_bdim;
- std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level);
- Tensor offset2bag_value;
- optional<int64_t> offset2bag_bdim;
- std::tie(offset2bag_value, offset2bag_bdim) = unwrapTensorAtLevel(offset2bag, cur_level);
- Tensor bag_size_value;
- optional<int64_t> bag_size_bdim;
- std::tie(bag_size_value, bag_size_bdim) = unwrapTensorAtLevel(bag_size, cur_level);
- optional<Tensor> per_sample_weights_value;
- optional<int64_t> per_sample_weights_bdim;
- if (per_sample_weights) {
- std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level);
- }
- auto results = batch_rule(grad_value, grad_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, offset2bag_value, offset2bag_bdim, bag_size_value, bag_size_bdim, num_weights, scale_grad_by_freq, mode, per_sample_weights_value, per_sample_weights_bdim, padding_idx);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _embedding_bag_dense_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offset2bag, cur_level) && !isBatchedAtLevel(bag_size, cur_level) && !isBatchedAtLevel(maximum_indices, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) {
- return at::_ops::_embedding_bag_dense_backward::call(grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx);
- }
- Tensor grad_value;
- optional<int64_t> grad_bdim;
- std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
- Tensor indices_value;
- optional<int64_t> indices_bdim;
- std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
- Tensor offset2bag_value;
- optional<int64_t> offset2bag_bdim;
- std::tie(offset2bag_value, offset2bag_bdim) = unwrapTensorAtLevel(offset2bag, cur_level);
- Tensor bag_size_value;
- optional<int64_t> bag_size_bdim;
- std::tie(bag_size_value, bag_size_bdim) = unwrapTensorAtLevel(bag_size, cur_level);
- Tensor maximum_indices_value;
- optional<int64_t> maximum_indices_bdim;
- std::tie(maximum_indices_value, maximum_indices_bdim) = unwrapTensorAtLevel(maximum_indices, cur_level);
- optional<Tensor> per_sample_weights_value;
- optional<int64_t> per_sample_weights_bdim;
- if (per_sample_weights) {
- std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level);
- }
- auto results = batch_rule(grad_value, grad_bdim, indices_value, indices_bdim, offset2bag_value, offset2bag_bdim, bag_size_value, bag_size_bdim, maximum_indices_value, maximum_indices_bdim, num_weights, scale_grad_by_freq, mode, per_sample_weights_value, per_sample_weights_bdim, padding_idx);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _embedding_bag_per_sample_weights_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(offset2bag, cur_level)) {
- return at::_ops::_embedding_bag_per_sample_weights_backward::call(grad, weight, indices, offsets, offset2bag, mode, padding_idx);
- }
- Tensor grad_value;
- optional<int64_t> grad_bdim;
- std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- Tensor indices_value;
- optional<int64_t> indices_bdim;
- std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
- Tensor offsets_value;
- optional<int64_t> offsets_bdim;
- std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level);
- Tensor offset2bag_value;
- optional<int64_t> offset2bag_bdim;
- std::tie(offset2bag_value, offset2bag_bdim) = unwrapTensorAtLevel(offset2bag, cur_level);
- auto results = batch_rule(grad_value, grad_bdim, weight_value, weight_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, offset2bag_value, offset2bag_bdim, mode, padding_idx);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor new_empty_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::new_empty::call(self, size, dtype, layout, device, pin_memory);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, size, dtype, layout, device, pin_memory);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor new_empty_strided_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::new_empty_strided::call(self, size, stride, dtype, layout, device, pin_memory);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, size, stride, dtype, layout, device, pin_memory);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor new_full_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::new_full::call(self, size, fill_value, dtype, layout, device, pin_memory);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, size, fill_value, dtype, layout, device, pin_memory);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor new_zeros_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::new_zeros::call(self, size, dtype, layout, device, pin_memory);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, size, dtype, layout, device, pin_memory);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor new_ones_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::new_ones::call(self, size, dtype, layout, device, pin_memory);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, size, dtype, layout, device, pin_memory);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _empty_per_channel_affine_quantized_generated_plumbing(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(scales, cur_level) && !isBatchedAtLevel(zero_points, cur_level)) {
- return at::_ops::_empty_per_channel_affine_quantized::call(size, scales, zero_points, axis, dtype, layout, device, pin_memory, memory_format);
- }
- Tensor scales_value;
- optional<int64_t> scales_bdim;
- std::tie(scales_value, scales_bdim) = unwrapTensorAtLevel(scales, cur_level);
- Tensor zero_points_value;
- optional<int64_t> zero_points_bdim;
- std::tie(zero_points_value, zero_points_bdim) = unwrapTensorAtLevel(zero_points, cur_level);
- auto results = batch_rule(size, scales_value, scales_bdim, zero_points_value, zero_points_bdim, axis, dtype, layout, device, pin_memory, memory_format);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- const at::Tensor & _resize_output__generated_plumbing(const at::Tensor & self, at::IntArrayRef size, at::Device device) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_resize_output_::call(self, size, device);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, size, device);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor empty_quantized_generated_plumbing(at::IntArrayRef size, const at::Tensor & qtensor, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(qtensor, cur_level)) {
- return at::_ops::empty_quantized::call(size, qtensor, dtype, layout, device, pin_memory, memory_format);
- }
- Tensor qtensor_value;
- optional<int64_t> qtensor_bdim;
- std::tie(qtensor_value, qtensor_bdim) = unwrapTensorAtLevel(qtensor, cur_level);
- auto results = batch_rule(size, qtensor_value, qtensor_bdim, dtype, layout, device, pin_memory, memory_format);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor empty_like_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::empty_like::call(self, dtype, layout, device, pin_memory, memory_format);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, memory_format);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor erf_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::erf::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & erf__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::erf_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor erfc_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::erfc::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & erfc__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::erfc_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor exp_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::exp::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & exp__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::exp_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor exp2_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::exp2::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & exp2__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::exp2_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor expm1_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::expm1::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & expm1__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::expm1_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor expand_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::expand::call(self, size, implicit);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, size, implicit);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor expand_as_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::expand_as::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor flatten_using_ints_generated_plumbing(const at::Tensor & self, int64_t start_dim, int64_t end_dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::flatten_using_ints::call(self, start_dim, end_dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, start_dim, end_dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor flatten_named_out_dim_generated_plumbing(const at::Tensor & self, int64_t start_dim, int64_t end_dim, at::Dimname out_dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::flatten_named_out_dim::call(self, start_dim, end_dim, out_dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, start_dim, end_dim, out_dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor flatten_using_names_generated_plumbing(const at::Tensor & self, at::Dimname start_dim, at::Dimname end_dim, at::Dimname out_dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::flatten_using_names::call(self, start_dim, end_dim, out_dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, start_dim, end_dim, out_dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor flatten_DimnameList_generated_plumbing(const at::Tensor & self, at::DimnameList dims, at::Dimname out_dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::flatten_DimnameList::call(self, dims, out_dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dims, out_dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor unflatten_int_generated_plumbing(const at::Tensor & self, int64_t dim, at::IntArrayRef sizes) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::unflatten_int::call(self, dim, sizes);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, sizes);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor unflatten_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, at::IntArrayRef sizes, at::DimnameList names) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::unflatten_Dimname::call(self, dim, sizes, names);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, sizes, names);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fill_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & value) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::fill_Scalar::call(self, value);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, value);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fill_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & value) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(value, cur_level)) {
- return at::_ops::fill_Tensor::call(self, value);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor value_value;
- optional<int64_t> value_bdim;
- std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
- auto results = batch_rule(self_value, self_bdim, value_value, value_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & fill__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & value) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::fill__Scalar::call(self, value);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, value);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & fill__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & value) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(value, cur_level)) {
- return at::_ops::fill__Tensor::call(self, value);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor value_value;
- optional<int64_t> value_bdim;
- std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
- batch_rule(self_value, self_bdim, value_value, value_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor floor_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::floor::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & floor__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::floor_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor floor_divide_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::floor_divide::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & floor_divide__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::floor_divide__Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor floor_divide_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::floor_divide_Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & floor_divide__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::floor_divide__Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, other);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor frac_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::frac::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & frac__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::frac_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor full_like_generated_plumbing(const at::Tensor & self, const at::Scalar & fill_value, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::full_like::call(self, fill_value, dtype, layout, device, pin_memory, memory_format);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, fill_value, dtype, layout, device, pin_memory, memory_format);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor gcd_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::gcd::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & gcd__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::gcd_::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor lcm_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::lcm::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & lcm__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::lcm_::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor grid_sampler_generated_plumbing(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) {
- return at::_ops::grid_sampler::call(input, grid, interpolation_mode, padding_mode, align_corners);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor grid_value;
- optional<int64_t> grid_bdim;
- std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level);
- auto results = batch_rule(input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor grid_sampler_2d_generated_plumbing(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) {
- return at::_ops::grid_sampler_2d::call(input, grid, interpolation_mode, padding_mode, align_corners);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor grid_value;
- optional<int64_t> grid_bdim;
- std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level);
- auto results = batch_rule(input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> grid_sampler_2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) {
- return at::_ops::grid_sampler_2d_backward::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor grid_value;
- optional<int64_t> grid_bdim;
- std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners, output_mask);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _grid_sampler_2d_cpu_fallback_generated_plumbing(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) {
- return at::_ops::_grid_sampler_2d_cpu_fallback::call(input, grid, interpolation_mode, padding_mode, align_corners);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor grid_value;
- optional<int64_t> grid_bdim;
- std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level);
- auto results = batch_rule(input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> _grid_sampler_2d_cpu_fallback_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) {
- return at::_ops::_grid_sampler_2d_cpu_fallback_backward::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor grid_value;
- optional<int64_t> grid_bdim;
- std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor grid_sampler_3d_generated_plumbing(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) {
- return at::_ops::grid_sampler_3d::call(input, grid, interpolation_mode, padding_mode, align_corners);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor grid_value;
- optional<int64_t> grid_bdim;
- std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level);
- auto results = batch_rule(input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> grid_sampler_3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) {
- return at::_ops::grid_sampler_3d_backward::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor grid_value;
- optional<int64_t> grid_bdim;
- std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners, output_mask);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor hinge_embedding_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, double margin, int64_t reduction) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
- return at::_ops::hinge_embedding_loss::call(self, target, margin, reduction);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor target_value;
- optional<int64_t> target_bdim;
- std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
- auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, margin, reduction);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor group_norm_generated_plumbing(const at::Tensor & input, int64_t num_groups, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps, bool cudnn_enabled) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::group_norm::call(input, num_groups, weight, bias, eps, cudnn_enabled);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- optional<Tensor> weight_value;
- optional<int64_t> weight_bdim;
- if (weight) {
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
- }
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, num_groups, weight_value, weight_bdim, bias_value, bias_bdim, eps, cudnn_enabled);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_group_norm_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::native_group_norm::call(input, weight, bias, N, C, HxW, group, eps);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- optional<Tensor> weight_value;
- optional<int64_t> weight_bdim;
- if (weight) {
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
- }
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, N, C, HxW, group, eps);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_group_norm_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array<bool,3> output_mask) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(rstd, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
- return at::_ops::native_group_norm_backward::call(grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask);
- }
- Tensor grad_out_value;
- optional<int64_t> grad_out_bdim;
- std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level);
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor mean_value;
- optional<int64_t> mean_bdim;
- std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level);
- Tensor rstd_value;
- optional<int64_t> rstd_bdim;
- std::tie(rstd_value, rstd_bdim) = unwrapTensorAtLevel(rstd, cur_level);
- optional<Tensor> weight_value;
- optional<int64_t> weight_bdim;
- if (weight) {
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
- }
- auto results = batch_rule(grad_out_value, grad_out_bdim, input_value, input_bdim, mean_value, mean_bdim, rstd_value, rstd_bdim, weight_value, weight_bdim, N, C, HxW, group, output_mask);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _fft_r2c_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_fft_r2c::call(self, dim, normalization, onesided);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, normalization, onesided);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _fft_c2r_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, int64_t last_dim_size) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_fft_c2r::call(self, dim, normalization, last_dim_size);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, normalization, last_dim_size);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _fft_c2c_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_fft_c2c::call(self, dim, normalization, forward);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, normalization, forward);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _validate_compressed_sparse_indices_generated_plumbing(bool is_crow, const at::Tensor & compressed_idx, const at::Tensor & plain_idx, int64_t cdim, int64_t dim, int64_t nnz) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(compressed_idx, cur_level) && !isBatchedAtLevel(plain_idx, cur_level)) {
- return at::_ops::_validate_compressed_sparse_indices::call(is_crow, compressed_idx, plain_idx, cdim, dim, nnz);
- }
- Tensor compressed_idx_value;
- optional<int64_t> compressed_idx_bdim;
- std::tie(compressed_idx_value, compressed_idx_bdim) = unwrapTensorAtLevel(compressed_idx, cur_level);
- Tensor plain_idx_value;
- optional<int64_t> plain_idx_bdim;
- std::tie(plain_idx_value, plain_idx_bdim) = unwrapTensorAtLevel(plain_idx, cur_level);
- batch_rule(is_crow, compressed_idx_value, compressed_idx_bdim, plain_idx_value, plain_idx_bdim, cdim, dim, nnz);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor index_Tensor_generated_plumbing(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
- return at::_ops::index_Tensor::call(self, indices);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, indices);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & index_copy__generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
- return at::_ops::index_copy_::call(self, dim, index, source);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- Tensor source_value;
- optional<int64_t> source_bdim;
- std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
- batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor index_copy_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
- return at::_ops::index_copy::call(self, dim, index, source);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- Tensor source_value;
- optional<int64_t> source_bdim;
- std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & index_copy__dimname_generated_plumbing(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
- return at::_ops::index_copy__dimname::call(self, dim, index, source);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- Tensor source_value;
- optional<int64_t> source_bdim;
- std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
- batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor index_copy_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
- return at::_ops::index_copy_dimname::call(self, dim, index, source);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- Tensor source_value;
- optional<int64_t> source_bdim;
- std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & index_put__generated_plumbing(at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
- return at::_ops::index_put_::call(self, indices, values, accumulate);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor values_value;
- optional<int64_t> values_bdim;
- std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
- batch_rule(self_value, self_bdim, indices, values_value, values_bdim, accumulate);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor index_put_generated_plumbing(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
- return at::_ops::index_put::call(self, indices, values, accumulate);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor values_value;
- optional<int64_t> values_bdim;
- std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
- auto results = batch_rule(self_value, self_bdim, indices, values_value, values_bdim, accumulate);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & _index_put_impl__generated_plumbing(at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
- return at::_ops::_index_put_impl_::call(self, indices, values, accumulate, unsafe);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor values_value;
- optional<int64_t> values_bdim;
- std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
- batch_rule(self_value, self_bdim, indices, values_value, values_bdim, accumulate, unsafe);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor instance_norm_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
- return at::_ops::instance_norm::call(input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, cudnn_enabled);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- optional<Tensor> weight_value;
- optional<int64_t> weight_bdim;
- if (weight) {
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
- }
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- optional<Tensor> running_mean_value;
- optional<int64_t> running_mean_bdim;
- if (running_mean) {
- std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
- }
- optional<Tensor> running_var_value;
- optional<int64_t> running_var_bdim;
- if (running_var) {
- std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, use_input_stats, momentum, eps, cudnn_enabled);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor isclose_generated_plumbing(const at::Tensor & self, const at::Tensor & other, double rtol, double atol, bool equal_nan) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::isclose::call(self, other, rtol, atol, equal_nan);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, rtol, atol, equal_nan);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor isin_Tensor_Tensor_generated_plumbing(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(elements, cur_level) && !isBatchedAtLevel(test_elements, cur_level)) {
- return at::_ops::isin_Tensor_Tensor::call(elements, test_elements, assume_unique, invert);
- }
- Tensor elements_value;
- optional<int64_t> elements_bdim;
- std::tie(elements_value, elements_bdim) = unwrapTensorAtLevel(elements, cur_level);
- Tensor test_elements_value;
- optional<int64_t> test_elements_bdim;
- std::tie(test_elements_value, test_elements_bdim) = unwrapTensorAtLevel(test_elements, cur_level);
- auto results = batch_rule(elements_value, elements_bdim, test_elements_value, test_elements_bdim, assume_unique, invert);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor isin_Tensor_Scalar_generated_plumbing(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(elements, cur_level)) {
- return at::_ops::isin_Tensor_Scalar::call(elements, test_element, assume_unique, invert);
- }
- Tensor elements_value;
- optional<int64_t> elements_bdim;
- std::tie(elements_value, elements_bdim) = unwrapTensorAtLevel(elements, cur_level);
- auto results = batch_rule(elements_value, elements_bdim, test_element, assume_unique, invert);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor isin_Scalar_Tensor_generated_plumbing(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(test_elements, cur_level)) {
- return at::_ops::isin_Scalar_Tensor::call(element, test_elements, assume_unique, invert);
- }
- Tensor test_elements_value;
- optional<int64_t> test_elements_bdim;
- std::tie(test_elements_value, test_elements_bdim) = unwrapTensorAtLevel(test_elements, cur_level);
- auto results = batch_rule(element, test_elements_value, test_elements_bdim, assume_unique, invert);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor isnan_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::isnan::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor isreal_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::isreal::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor kl_div_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction, bool log_target) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
- return at::_ops::kl_div::call(self, target, reduction, log_target);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor target_value;
- optional<int64_t> target_bdim;
- std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
- auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction, log_target);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor kron_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::kron::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> kthvalue_generated_plumbing(const at::Tensor & self, int64_t k, int64_t dim, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::kthvalue::call(self, k, dim, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, k, dim, keepdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> kthvalue_dimname_generated_plumbing(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::kthvalue_dimname::call(self, k, dim, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, k, dim, keepdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor layer_norm_generated_plumbing(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps, bool cudnn_enable) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::layer_norm::call(input, normalized_shape, weight, bias, eps, cudnn_enable);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- optional<Tensor> weight_value;
- optional<int64_t> weight_bdim;
- if (weight) {
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
- }
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, normalized_shape, weight_value, weight_bdim, bias_value, bias_bdim, eps, cudnn_enable);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm_generated_plumbing(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::native_layer_norm::call(input, normalized_shape, weight, bias, eps);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- optional<Tensor> weight_value;
- optional<int64_t> weight_bdim;
- if (weight) {
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
- }
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, normalized_shape, weight_value, weight_bdim, bias_value, bias_bdim, eps);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(rstd, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::native_layer_norm_backward::call(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask);
- }
- Tensor grad_out_value;
- optional<int64_t> grad_out_bdim;
- std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level);
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor mean_value;
- optional<int64_t> mean_bdim;
- std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level);
- Tensor rstd_value;
- optional<int64_t> rstd_bdim;
- std::tie(rstd_value, rstd_bdim) = unwrapTensorAtLevel(rstd, cur_level);
- optional<Tensor> weight_value;
- optional<int64_t> weight_bdim;
- if (weight) {
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
- }
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(grad_out_value, grad_out_bdim, input_value, input_bdim, normalized_shape, mean_value, mean_bdim, rstd_value, rstd_bdim, weight_value, weight_bdim, bias_value, bias_bdim, output_mask);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor nan_to_num_generated_plumbing(const at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::nan_to_num::call(self, nan, posinf, neginf);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, nan, posinf, neginf);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & nan_to_num__generated_plumbing(at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::nan_to_num_::call(self, nan, posinf, neginf);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, nan, posinf, neginf);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor linear_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::linear::call(input, weight, bias);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> linear_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
- return at::_ops::linear_backward::call(self, grad_output, weight, output_mask);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- auto results = batch_rule(self_value, self_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, output_mask);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor mkldnn_linear_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::mkldnn_linear::call(self, weight, bias);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor mkldnn_linear_backward_input_generated_plumbing(at::IntArrayRef input_size, const at::Tensor & grad_output, const at::Tensor & weight) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
- return at::_ops::mkldnn_linear_backward_input::call(input_size, grad_output, weight);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- auto results = batch_rule(input_size, grad_output_value, grad_output_bdim, weight_value, weight_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> mkldnn_linear_backward_weights_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
- return at::_ops::mkldnn_linear_backward_weights::call(grad_output, input, weight, bias_defined);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, weight_value, weight_bdim, bias_defined);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> mkldnn_linear_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
- return at::_ops::mkldnn_linear_backward::call(self, grad_output, weight, output_mask);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- auto results = batch_rule(self_value, self_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, output_mask);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fbgemm_linear_int8_weight_fp32_activation_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(packed, cur_level) && !isBatchedAtLevel(col_offsets, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::fbgemm_linear_int8_weight_fp32_activation::call(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- Tensor packed_value;
- optional<int64_t> packed_bdim;
- std::tie(packed_value, packed_bdim) = unwrapTensorAtLevel(packed, cur_level);
- Tensor col_offsets_value;
- optional<int64_t> col_offsets_bdim;
- std::tie(col_offsets_value, col_offsets_bdim) = unwrapTensorAtLevel(col_offsets, cur_level);
- Tensor bias_value;
- optional<int64_t> bias_bdim;
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias, cur_level);
- auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, packed_value, packed_bdim, col_offsets_value, col_offsets_bdim, weight_scale, weight_zero_point, bias_value, bias_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fbgemm_linear_int8_weight_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(packed, cur_level) && !isBatchedAtLevel(col_offsets, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::fbgemm_linear_int8_weight::call(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- Tensor packed_value;
- optional<int64_t> packed_bdim;
- std::tie(packed_value, packed_bdim) = unwrapTensorAtLevel(packed, cur_level);
- Tensor col_offsets_value;
- optional<int64_t> col_offsets_bdim;
- std::tie(col_offsets_value, col_offsets_bdim) = unwrapTensorAtLevel(col_offsets, cur_level);
- Tensor bias_value;
- optional<int64_t> bias_bdim;
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias, cur_level);
- auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, packed_value, packed_bdim, col_offsets_value, col_offsets_bdim, weight_scale, weight_zero_point, bias_value, bias_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fbgemm_pack_gemm_matrix_fp16_generated_plumbing(const at::Tensor & input) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level)) {
- return at::_ops::fbgemm_pack_gemm_matrix_fp16::call(input);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- auto results = batch_rule(input_value, input_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fbgemm_linear_fp16_weight_fp32_activation_generated_plumbing(const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(packed_weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::fbgemm_linear_fp16_weight_fp32_activation::call(input, packed_weight, bias);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor packed_weight_value;
- optional<int64_t> packed_weight_bdim;
- std::tie(packed_weight_value, packed_weight_bdim) = unwrapTensorAtLevel(packed_weight, cur_level);
- Tensor bias_value;
- optional<int64_t> bias_bdim;
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias, cur_level);
- auto results = batch_rule(input_value, input_bdim, packed_weight_value, packed_weight_bdim, bias_value, bias_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fbgemm_linear_fp16_weight_generated_plumbing(const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(packed_weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::fbgemm_linear_fp16_weight::call(input, packed_weight, bias);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor packed_weight_value;
- optional<int64_t> packed_weight_bdim;
- std::tie(packed_weight_value, packed_weight_bdim) = unwrapTensorAtLevel(packed_weight, cur_level);
- Tensor bias_value;
- optional<int64_t> bias_bdim;
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias, cur_level);
- auto results = batch_rule(input_value, input_bdim, packed_weight_value, packed_weight_bdim, bias_value, bias_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fbgemm_pack_quantized_matrix_generated_plumbing(const at::Tensor & input) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level)) {
- return at::_ops::fbgemm_pack_quantized_matrix::call(input);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- auto results = batch_rule(input_value, input_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fbgemm_pack_quantized_matrix_KN_generated_plumbing(const at::Tensor & input, int64_t K, int64_t N) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level)) {
- return at::_ops::fbgemm_pack_quantized_matrix_KN::call(input, K, N);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- auto results = batch_rule(input_value, input_bdim, K, N);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor ldexp_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::ldexp_Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & ldexp__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::ldexp_::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor log_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::log::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & log__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::log_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor log10_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::log10::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & log10__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::log10_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor log1p_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::log1p::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & log1p__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::log1p_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor log2_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::log2::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & log2__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::log2_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor logaddexp_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::logaddexp::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor logaddexp2_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::logaddexp2::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor xlogy_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::xlogy_Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor xlogy_Scalar_Self_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(other, cur_level)) {
- return at::_ops::xlogy_Scalar_Self::call(self, other);
- }
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor xlogy_Scalar_Other_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::xlogy_Scalar_Other::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & xlogy__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::xlogy__Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & xlogy__Scalar_Other_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::xlogy__Scalar_Other::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, other);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor log_softmax_int_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::log_softmax_int::call(self, dim, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor log_softmax_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::log_softmax_Dimname::call(self, dim, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _log_softmax_generated_plumbing(const at::Tensor & self, int64_t dim, bool half_to_float) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_log_softmax::call(self, dim, half_to_float);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, half_to_float);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _log_softmax_backward_data_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level)) {
- return at::_ops::_log_softmax_backward_data::call(grad_output, output, dim, input_dtype);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor output_value;
- optional<int64_t> output_bdim;
- std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, dim, input_dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _logcumsumexp_generated_plumbing(const at::Tensor & self, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_logcumsumexp::call(self, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor logcumsumexp_generated_plumbing(const at::Tensor & self, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::logcumsumexp::call(self, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor logcumsumexp_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::logcumsumexp_dimname::call(self, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor logsumexp_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::logsumexp::call(self, dim, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, keepdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor logsumexp_names_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::logsumexp_names::call(self, dim, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, keepdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor margin_ranking_loss_generated_plumbing(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input1, cur_level) && !isBatchedAtLevel(input2, cur_level) && !isBatchedAtLevel(target, cur_level)) {
- return at::_ops::margin_ranking_loss::call(input1, input2, target, margin, reduction);
- }
- Tensor input1_value;
- optional<int64_t> input1_bdim;
- std::tie(input1_value, input1_bdim) = unwrapTensorAtLevel(input1, cur_level);
- Tensor input2_value;
- optional<int64_t> input2_bdim;
- std::tie(input2_value, input2_bdim) = unwrapTensorAtLevel(input2, cur_level);
- Tensor target_value;
- optional<int64_t> target_bdim;
- std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
- auto results = batch_rule(input1_value, input1_bdim, input2_value, input2_bdim, target_value, target_bdim, margin, reduction);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor matmul_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::matmul::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> matmul_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array<bool,2> mask) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::matmul_backward::call(grad, self, other, mask);
- }
- Tensor grad_value;
- optional<int64_t> grad_bdim;
- std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, other_value, other_bdim, mask);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor matrix_power_generated_plumbing(const at::Tensor & self, int64_t n) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::matrix_power::call(self, n);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, n);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor matrix_exp_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::matrix_exp::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor matrix_exp_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad, cur_level)) {
- return at::_ops::matrix_exp_backward::call(self, grad);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor grad_value;
- optional<int64_t> grad_bdim;
- std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
- auto results = batch_rule(self_value, self_bdim, grad_value, grad_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> _aminmax_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_aminmax::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> _aminmax_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_aminmax_dim::call(self, dim, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, keepdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> aminmax_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::aminmax::call(self, dim, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, keepdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _compute_linear_combination_generated_plumbing(const at::Tensor & input, const at::Tensor & coefficients) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(coefficients, cur_level)) {
- return at::_ops::_compute_linear_combination::call(input, coefficients);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor coefficients_value;
- optional<int64_t> coefficients_bdim;
- std::tie(coefficients_value, coefficients_bdim) = unwrapTensorAtLevel(coefficients, cur_level);
- auto results = batch_rule(input_value, input_bdim, coefficients_value, coefficients_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> max_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::max_dim::call(self, dim, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, keepdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> max_names_dim_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::max_names_dim::call(self, dim, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, keepdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor value_selecting_reduction_backward_generated_plumbing(const at::Tensor & grad, int64_t dim, const at::Tensor & indices, c10::SymIntArrayRef sizes, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
- return at::_ops::value_selecting_reduction_backward::call(grad, dim, indices, sizes, keepdim);
- }
- Tensor grad_value;
- optional<int64_t> grad_bdim;
- std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
- Tensor indices_value;
- optional<int64_t> indices_bdim;
- std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
- auto results = batch_rule(grad_value, grad_bdim, dim, indices_value, indices_bdim, sizes, keepdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor amax_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::amax::call(self, dim, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, keepdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> max_pool1d_with_indices_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::max_pool1d_with_indices::call(self, kernel_size, stride, padding, dilation, ceil_mode);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor max_pool1d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::max_pool1d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor max_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::max_pool2d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor max_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
- return at::_ops::max_pool2d_backward::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor mkldnn_max_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::mkldnn_max_pool2d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor mkldnn_max_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(input, cur_level)) {
- return at::_ops::mkldnn_max_pool2d_backward::call(grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor output_value;
- optional<int64_t> output_bdim;
- std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, input_value, input_bdim, kernel_size, stride, padding, dilation, ceil_mode);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor mkldnn_max_pool3d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::mkldnn_max_pool3d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor mkldnn_max_pool3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(input, cur_level)) {
- return at::_ops::mkldnn_max_pool3d_backward::call(grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor output_value;
- optional<int64_t> output_bdim;
- std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, input_value, input_bdim, kernel_size, stride, padding, dilation, ceil_mode);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor quantized_max_pool1d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::quantized_max_pool1d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor quantized_max_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::quantized_max_pool2d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor max_pool3d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::max_pool3d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor mean_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::mean::call(self, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor mean_dim_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::mean_dim::call(self, dim, keepdim, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor mean_names_dim_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::mean_names_dim::call(self, dim, keepdim, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor nanmean_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::nanmean::call(self, dim, keepdim, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor median_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::median::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> median_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::median_dim::call(self, dim, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, keepdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> median_names_dim_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::median_names_dim::call(self, dim, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, keepdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor nanmedian_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::nanmedian::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> nanmedian_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::nanmedian_dim::call(self, dim, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, keepdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> nanmedian_names_dim_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::nanmedian_names_dim::call(self, dim, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, keepdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> min_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::min_dim::call(self, dim, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, keepdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> min_names_dim_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::min_names_dim::call(self, dim, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, keepdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor amin_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::amin::call(self, dim, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, keepdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _mps_convolution_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::_mps_convolution::call(self, weight, bias, padding, stride, dilation, groups);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, stride, dilation, groups);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> mps_convolution_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array<bool,3> output_mask) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
- return at::_ops::mps_convolution_backward::call(self, grad_output, weight, padding, stride, dilation, groups, output_mask);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- auto results = batch_rule(self_value, self_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, padding, stride, dilation, groups, output_mask);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor mkldnn_convolution_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::mkldnn_convolution::call(self, weight, bias, padding, stride, dilation, groups);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, stride, dilation, groups);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> mkldnn_rnn_layer_generated_plumbing(const at::Tensor & input, const at::Tensor & weight0, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & hx_, const at::Tensor & cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight0, cur_level) && !isBatchedAtLevel(weight1, cur_level) && !isBatchedAtLevel(weight2, cur_level) && !isBatchedAtLevel(weight3, cur_level) && !isBatchedAtLevel(hx_, cur_level) && !isBatchedAtLevel(cx_, cur_level)) {
- return at::_ops::mkldnn_rnn_layer::call(input, weight0, weight1, weight2, weight3, hx_, cx_, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor weight0_value;
- optional<int64_t> weight0_bdim;
- std::tie(weight0_value, weight0_bdim) = unwrapTensorAtLevel(weight0, cur_level);
- Tensor weight1_value;
- optional<int64_t> weight1_bdim;
- std::tie(weight1_value, weight1_bdim) = unwrapTensorAtLevel(weight1, cur_level);
- Tensor weight2_value;
- optional<int64_t> weight2_bdim;
- std::tie(weight2_value, weight2_bdim) = unwrapTensorAtLevel(weight2, cur_level);
- Tensor weight3_value;
- optional<int64_t> weight3_bdim;
- std::tie(weight3_value, weight3_bdim) = unwrapTensorAtLevel(weight3, cur_level);
- Tensor hx__value;
- optional<int64_t> hx__bdim;
- std::tie(hx__value, hx__bdim) = unwrapTensorAtLevel(hx_, cur_level);
- Tensor cx__value;
- optional<int64_t> cx__bdim;
- std::tie(cx__value, cx__bdim) = unwrapTensorAtLevel(cx_, cur_level);
- auto results = batch_rule(input_value, input_bdim, weight0_value, weight0_bdim, weight1_value, weight1_bdim, weight2_value, weight2_bdim, weight3_value, weight3_bdim, hx__value, hx__bdim, cx__value, cx__bdim, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> mkldnn_rnn_layer_backward_generated_plumbing(const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight1, cur_level) && !isBatchedAtLevel(weight2, cur_level) && !isBatchedAtLevel(weight3, cur_level) && !isBatchedAtLevel(weight4, cur_level) && !isBatchedAtLevel(hx_, cur_level) && !isBatchedAtLevel(cx_tmp, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(hy_, cur_level) && !isBatchedAtLevel(cy_, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(workspace, cur_level)) {
- return at::_ops::mkldnn_rnn_layer_backward::call(input, weight1, weight2, weight3, weight4, hx_, cx_tmp, output, hy_, cy_, grad_output, grad_hy, grad_cy, reverse, mode, hidden_size, num_layers, has_biases, train, bidirectional, batch_sizes, batch_first, workspace);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor weight1_value;
- optional<int64_t> weight1_bdim;
- std::tie(weight1_value, weight1_bdim) = unwrapTensorAtLevel(weight1, cur_level);
- Tensor weight2_value;
- optional<int64_t> weight2_bdim;
- std::tie(weight2_value, weight2_bdim) = unwrapTensorAtLevel(weight2, cur_level);
- Tensor weight3_value;
- optional<int64_t> weight3_bdim;
- std::tie(weight3_value, weight3_bdim) = unwrapTensorAtLevel(weight3, cur_level);
- Tensor weight4_value;
- optional<int64_t> weight4_bdim;
- std::tie(weight4_value, weight4_bdim) = unwrapTensorAtLevel(weight4, cur_level);
- Tensor hx__value;
- optional<int64_t> hx__bdim;
- std::tie(hx__value, hx__bdim) = unwrapTensorAtLevel(hx_, cur_level);
- Tensor cx_tmp_value;
- optional<int64_t> cx_tmp_bdim;
- std::tie(cx_tmp_value, cx_tmp_bdim) = unwrapTensorAtLevel(cx_tmp, cur_level);
- Tensor output_value;
- optional<int64_t> output_bdim;
- std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
- Tensor hy__value;
- optional<int64_t> hy__bdim;
- std::tie(hy__value, hy__bdim) = unwrapTensorAtLevel(hy_, cur_level);
- Tensor cy__value;
- optional<int64_t> cy__bdim;
- std::tie(cy__value, cy__bdim) = unwrapTensorAtLevel(cy_, cur_level);
- Tensor workspace_value;
- optional<int64_t> workspace_bdim;
- std::tie(workspace_value, workspace_bdim) = unwrapTensorAtLevel(workspace, cur_level);
- optional<Tensor> grad_output_value;
- optional<int64_t> grad_output_bdim;
- if (grad_output) {
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output.value(), cur_level);
- }
- optional<Tensor> grad_hy_value;
- optional<int64_t> grad_hy_bdim;
- if (grad_hy) {
- std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
- }
- optional<Tensor> grad_cy_value;
- optional<int64_t> grad_cy_bdim;
- if (grad_cy) {
- std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, weight1_value, weight1_bdim, weight2_value, weight2_bdim, weight3_value, weight3_bdim, weight4_value, weight4_bdim, hx__value, hx__bdim, cx_tmp_value, cx_tmp_bdim, output_value, output_bdim, hy__value, hy__bdim, cy__value, cy__bdim, grad_output_value, grad_output_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, reverse, mode, hidden_size, num_layers, has_biases, train, bidirectional, batch_sizes, batch_first, workspace_value, workspace_bdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level), makeBatched(std::get<10>(results), std::get<11>(results), cur_level), makeBatched(std::get<12>(results), std::get<13>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> miopen_batch_norm_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
- return at::_ops::miopen_batch_norm::call(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- optional<Tensor> running_mean_value;
- optional<int64_t> running_mean_bdim;
- if (running_mean) {
- std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
- }
- optional<Tensor> running_var_value;
- optional<int64_t> running_var_bdim;
- if (running_var) {
- std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, training, exponential_average_factor, epsilon);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> miopen_batch_norm_backward_generated_plumbing(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var, double epsilon) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level) && !isBatchedAtLevel(save_mean, cur_level) && !isBatchedAtLevel(save_var, cur_level)) {
- return at::_ops::miopen_batch_norm_backward::call(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- optional<Tensor> running_mean_value;
- optional<int64_t> running_mean_bdim;
- if (running_mean) {
- std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
- }
- optional<Tensor> running_var_value;
- optional<int64_t> running_var_bdim;
- if (running_var) {
- std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
- }
- optional<Tensor> save_mean_value;
- optional<int64_t> save_mean_bdim;
- if (save_mean) {
- std::tie(save_mean_value, save_mean_bdim) = unwrapTensorAtLevel(save_mean.value(), cur_level);
- }
- optional<Tensor> save_var_value;
- optional<int64_t> save_var_bdim;
- if (save_var) {
- std::tie(save_var_value, save_var_bdim) = unwrapTensorAtLevel(save_var.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, save_mean_value, save_mean_bdim, save_var_value, save_var_bdim, epsilon);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor miopen_convolution_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::miopen_convolution::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, stride, dilation, groups, benchmark, deterministic);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor miopen_convolution_transpose_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::miopen_convolution_transpose::call(self, weight, bias, padding, output_padding, stride, dilation, groups, benchmark, deterministic);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, output_padding, stride, dilation, groups, benchmark, deterministic);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor miopen_depthwise_convolution_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::miopen_depthwise_convolution::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, stride, dilation, groups, benchmark, deterministic);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor miopen_convolution_relu_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::miopen_convolution_relu::call(self, weight, bias, stride, padding, dilation, groups);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor miopen_convolution_add_relu_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional<at::Scalar> & alpha, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(z, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::miopen_convolution_add_relu::call(self, weight, z, alpha, bias, stride, padding, dilation, groups);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- Tensor z_value;
- optional<int64_t> z_bdim;
- std::tie(z_value, z_bdim) = unwrapTensorAtLevel(z, cur_level);
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, z_value, z_bdim, alpha, bias_value, bias_bdim, stride, padding, dilation, groups);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> miopen_rnn_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(dropout_state, cur_level)) {
- return at::_ops::miopen_rnn::call(input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor hx_value;
- optional<int64_t> hx_bdim;
- std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
- optional<Tensor> cx_value;
- optional<int64_t> cx_bdim;
- if (cx) {
- std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level);
- }
- optional<Tensor> dropout_state_value;
- optional<int64_t> dropout_state_bdim;
- if (dropout_state) {
- std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, weight, weight_stride0, hx_value, hx_bdim, cx_value, cx_bdim, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor,::std::vector<at::Tensor>> miopen_rnn_backward_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(weight_buf, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(dropout_state, cur_level) && !isBatchedAtLevel(reserve, cur_level)) {
- return at::_ops::miopen_rnn_backward::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor weight_buf_value;
- optional<int64_t> weight_buf_bdim;
- std::tie(weight_buf_value, weight_buf_bdim) = unwrapTensorAtLevel(weight_buf, cur_level);
- Tensor hx_value;
- optional<int64_t> hx_bdim;
- std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
- Tensor output_value;
- optional<int64_t> output_bdim;
- std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
- Tensor reserve_value;
- optional<int64_t> reserve_bdim;
- std::tie(reserve_value, reserve_bdim) = unwrapTensorAtLevel(reserve, cur_level);
- optional<Tensor> cx_value;
- optional<int64_t> cx_bdim;
- if (cx) {
- std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level);
- }
- optional<Tensor> grad_output_value;
- optional<int64_t> grad_output_bdim;
- if (grad_output) {
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output.value(), cur_level);
- }
- optional<Tensor> grad_hy_value;
- optional<int64_t> grad_hy_bdim;
- if (grad_hy) {
- std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
- }
- optional<Tensor> grad_cy_value;
- optional<int64_t> grad_cy_bdim;
- if (grad_cy) {
- std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
- }
- optional<Tensor> dropout_state_value;
- optional<int64_t> dropout_state_bdim;
- if (dropout_state) {
- std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, weight, weight_stride0, weight_buf_value, weight_buf_bdim, hx_value, hx_bdim, cx_value, cx_bdim, output_value, output_bdim, grad_output_value, grad_output_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim, reserve_value, reserve_bdim, output_mask);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatchedVector(std::get<6>(results), std::get<7>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor mm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
- return at::_ops::mm::call(self, mat2);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor mat2_value;
- optional<int64_t> mat2_bdim;
- std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
- auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _sparse_mm_generated_plumbing(const at::Tensor & sparse, const at::Tensor & dense) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(sparse, cur_level) && !isBatchedAtLevel(dense, cur_level)) {
- return at::_ops::_sparse_mm::call(sparse, dense);
- }
- Tensor sparse_value;
- optional<int64_t> sparse_bdim;
- std::tie(sparse_value, sparse_bdim) = unwrapTensorAtLevel(sparse, cur_level);
- Tensor dense_value;
- optional<int64_t> dense_bdim;
- std::tie(dense_value, dense_bdim) = unwrapTensorAtLevel(dense, cur_level);
- auto results = batch_rule(sparse_value, sparse_bdim, dense_value, dense_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _sparse_mm_reduce_generated_plumbing(const at::Tensor & sparse, const at::Tensor & dense, c10::string_view reduce) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(sparse, cur_level) && !isBatchedAtLevel(dense, cur_level)) {
- return at::_ops::_sparse_mm_reduce::call(sparse, dense, reduce);
- }
- Tensor sparse_value;
- optional<int64_t> sparse_bdim;
- std::tie(sparse_value, sparse_bdim) = unwrapTensorAtLevel(sparse, cur_level);
- Tensor dense_value;
- optional<int64_t> dense_bdim;
- std::tie(dense_value, dense_bdim) = unwrapTensorAtLevel(dense, cur_level);
- auto results = batch_rule(sparse_value, sparse_bdim, dense_value, dense_bdim, reduce);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _sparse_sparse_matmul_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::_sparse_sparse_matmul::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> mode_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::mode::call(self, dim, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, keepdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> mode_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::mode_dimname::call(self, dim, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, keepdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor mul_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::mul_Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & mul__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::mul__Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor mul_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::mul_Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & mul__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::mul__Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, other);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor multiply_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::multiply_Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & multiply__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::multiply__Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor multiply_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::multiply_Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & multiply__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::multiply__Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, other);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor mv_generated_plumbing(const at::Tensor & self, const at::Tensor & vec) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(vec, cur_level)) {
- return at::_ops::mv::call(self, vec);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor vec_value;
- optional<int64_t> vec_bdim;
- std::tie(vec_value, vec_bdim) = unwrapTensorAtLevel(vec, cur_level);
- auto results = batch_rule(self_value, self_bdim, vec_value, vec_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor mvlgamma_generated_plumbing(const at::Tensor & self, int64_t p) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::mvlgamma::call(self, p);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, p);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & mvlgamma__generated_plumbing(at::Tensor & self, int64_t p) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::mvlgamma_::call(self, p);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, p);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor narrow_copy_generated_plumbing(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::narrow_copy::call(self, dim, start, length);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, start, length);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor narrow_generated_plumbing(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::narrow::call(self, dim, start, length);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, start, length);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor narrow_Tensor_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & start, c10::SymInt length) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(start, cur_level)) {
- return at::_ops::narrow_Tensor::call(self, dim, start, length);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor start_value;
- optional<int64_t> start_bdim;
- std::tie(start_value, start_bdim) = unwrapTensorAtLevel(start, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, start_value, start_bdim, length);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_batch_norm_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double momentum, double eps) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
- return at::_ops::native_batch_norm::call(input, weight, bias, running_mean, running_var, training, momentum, eps);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- optional<Tensor> weight_value;
- optional<int64_t> weight_bdim;
- if (weight) {
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
- }
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- optional<Tensor> running_mean_value;
- optional<int64_t> running_mean_bdim;
- if (running_mean) {
- std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
- }
- optional<Tensor> running_var_value;
- optional<int64_t> running_var_bdim;
- if (running_var) {
- std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, training, momentum, eps);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit_no_stats_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, bool training, double momentum, double eps) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::_native_batch_norm_legit_no_stats::call(input, weight, bias, training, momentum, eps);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- optional<Tensor> weight_value;
- optional<int64_t> weight_bdim;
- if (weight) {
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
- }
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, training, momentum, eps);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> batch_norm_stats_generated_plumbing(const at::Tensor & input, double eps) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level)) {
- return at::_ops::batch_norm_stats::call(input, eps);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- auto results = batch_rule(input_value, input_bdim, eps);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor batch_norm_elemt_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & invstd, double eps) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(invstd, cur_level)) {
- return at::_ops::batch_norm_elemt::call(input, weight, bias, mean, invstd, eps);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor mean_value;
- optional<int64_t> mean_bdim;
- std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level);
- Tensor invstd_value;
- optional<int64_t> invstd_bdim;
- std::tie(invstd_value, invstd_bdim) = unwrapTensorAtLevel(invstd, cur_level);
- optional<Tensor> weight_value;
- optional<int64_t> weight_bdim;
- if (weight) {
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
- }
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, mean_value, mean_bdim, invstd_value, invstd_bdim, eps);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> batch_norm_gather_stats_generated_plumbing(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, int64_t count) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(invstd, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
- return at::_ops::batch_norm_gather_stats::call(input, mean, invstd, running_mean, running_var, momentum, eps, count);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor mean_value;
- optional<int64_t> mean_bdim;
- std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level);
- Tensor invstd_value;
- optional<int64_t> invstd_bdim;
- std::tie(invstd_value, invstd_bdim) = unwrapTensorAtLevel(invstd, cur_level);
- optional<Tensor> running_mean_value;
- optional<int64_t> running_mean_bdim;
- if (running_mean) {
- std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
- }
- optional<Tensor> running_var_value;
- optional<int64_t> running_var_bdim;
- if (running_var) {
- std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, mean_value, mean_bdim, invstd_value, invstd_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, momentum, eps, count);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> batch_norm_gather_stats_with_counts_generated_plumbing(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, const at::Tensor & counts) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(invstd, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level) && !isBatchedAtLevel(counts, cur_level)) {
- return at::_ops::batch_norm_gather_stats_with_counts::call(input, mean, invstd, running_mean, running_var, momentum, eps, counts);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor mean_value;
- optional<int64_t> mean_bdim;
- std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level);
- Tensor invstd_value;
- optional<int64_t> invstd_bdim;
- std::tie(invstd_value, invstd_bdim) = unwrapTensorAtLevel(invstd, cur_level);
- Tensor counts_value;
- optional<int64_t> counts_bdim;
- std::tie(counts_value, counts_bdim) = unwrapTensorAtLevel(counts, cur_level);
- optional<Tensor> running_mean_value;
- optional<int64_t> running_mean_bdim;
- if (running_mean) {
- std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
- }
- optional<Tensor> running_var_value;
- optional<int64_t> running_var_bdim;
- if (running_var) {
- std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, mean_value, mean_bdim, invstd_value, invstd_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, momentum, eps, counts_value, counts_bdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_batch_norm_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_invstd, bool train, double eps, ::std::array<bool,3> output_mask) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level) && !isBatchedAtLevel(save_mean, cur_level) && !isBatchedAtLevel(save_invstd, cur_level)) {
- return at::_ops::native_batch_norm_backward::call(grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask);
- }
- Tensor grad_out_value;
- optional<int64_t> grad_out_bdim;
- std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level);
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- optional<Tensor> weight_value;
- optional<int64_t> weight_bdim;
- if (weight) {
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
- }
- optional<Tensor> running_mean_value;
- optional<int64_t> running_mean_bdim;
- if (running_mean) {
- std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
- }
- optional<Tensor> running_var_value;
- optional<int64_t> running_var_bdim;
- if (running_var) {
- std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
- }
- optional<Tensor> save_mean_value;
- optional<int64_t> save_mean_bdim;
- if (save_mean) {
- std::tie(save_mean_value, save_mean_bdim) = unwrapTensorAtLevel(save_mean.value(), cur_level);
- }
- optional<Tensor> save_invstd_value;
- optional<int64_t> save_invstd_bdim;
- if (save_invstd) {
- std::tie(save_invstd_value, save_invstd_bdim) = unwrapTensorAtLevel(save_invstd.value(), cur_level);
- }
- auto results = batch_rule(grad_out_value, grad_out_bdim, input_value, input_bdim, weight_value, weight_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, save_mean_value, save_mean_bdim, save_invstd_value, save_invstd_bdim, train, eps, output_mask);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> batch_norm_backward_reduce_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & weight, bool input_g, bool weight_g, bool bias_g) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(invstd, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
- return at::_ops::batch_norm_backward_reduce::call(grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g);
- }
- Tensor grad_out_value;
- optional<int64_t> grad_out_bdim;
- std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level);
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor mean_value;
- optional<int64_t> mean_bdim;
- std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level);
- Tensor invstd_value;
- optional<int64_t> invstd_bdim;
- std::tie(invstd_value, invstd_bdim) = unwrapTensorAtLevel(invstd, cur_level);
- optional<Tensor> weight_value;
- optional<int64_t> weight_bdim;
- if (weight) {
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
- }
- auto results = batch_rule(grad_out_value, grad_out_bdim, input_value, input_bdim, mean_value, mean_bdim, invstd_value, invstd_bdim, weight_value, weight_bdim, input_g, weight_g, bias_g);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor batch_norm_backward_elemt_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & weight, const at::Tensor & mean_dy, const at::Tensor & mean_dy_xmu, const at::Tensor & count) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(invstd, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(mean_dy, cur_level) && !isBatchedAtLevel(mean_dy_xmu, cur_level) && !isBatchedAtLevel(count, cur_level)) {
- return at::_ops::batch_norm_backward_elemt::call(grad_out, input, mean, invstd, weight, mean_dy, mean_dy_xmu, count);
- }
- Tensor grad_out_value;
- optional<int64_t> grad_out_bdim;
- std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level);
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor mean_value;
- optional<int64_t> mean_bdim;
- std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level);
- Tensor invstd_value;
- optional<int64_t> invstd_bdim;
- std::tie(invstd_value, invstd_bdim) = unwrapTensorAtLevel(invstd, cur_level);
- Tensor mean_dy_value;
- optional<int64_t> mean_dy_bdim;
- std::tie(mean_dy_value, mean_dy_bdim) = unwrapTensorAtLevel(mean_dy, cur_level);
- Tensor mean_dy_xmu_value;
- optional<int64_t> mean_dy_xmu_bdim;
- std::tie(mean_dy_xmu_value, mean_dy_xmu_bdim) = unwrapTensorAtLevel(mean_dy_xmu, cur_level);
- Tensor count_value;
- optional<int64_t> count_bdim;
- std::tie(count_value, count_bdim) = unwrapTensorAtLevel(count, cur_level);
- optional<Tensor> weight_value;
- optional<int64_t> weight_bdim;
- if (weight) {
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
- }
- auto results = batch_rule(grad_out_value, grad_out_bdim, input_value, input_bdim, mean_value, mean_bdim, invstd_value, invstd_bdim, weight_value, weight_bdim, mean_dy_value, mean_dy_bdim, mean_dy_xmu_value, mean_dy_xmu_bdim, count_value, count_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> batch_norm_update_stats_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
- return at::_ops::batch_norm_update_stats::call(input, running_mean, running_var, momentum);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- optional<Tensor> running_mean_value;
- optional<int64_t> running_mean_bdim;
- if (running_mean) {
- std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
- }
- optional<Tensor> running_var_value;
- optional<int64_t> running_var_bdim;
- if (running_var) {
- std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, momentum);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _nnpack_spatial_convolution_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::_nnpack_spatial_convolution::call(input, weight, bias, padding, stride);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, stride);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor ones_like_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::ones_like::call(self, dtype, layout, device, pin_memory, memory_format);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, memory_format);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor pairwise_distance_generated_plumbing(const at::Tensor & x1, const at::Tensor & x2, double p, double eps, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level)) {
- return at::_ops::pairwise_distance::call(x1, x2, p, eps, keepdim);
- }
- Tensor x1_value;
- optional<int64_t> x1_bdim;
- std::tie(x1_value, x1_bdim) = unwrapTensorAtLevel(x1, cur_level);
- Tensor x2_value;
- optional<int64_t> x2_bdim;
- std::tie(x2_value, x2_bdim) = unwrapTensorAtLevel(x2, cur_level);
- auto results = batch_rule(x1_value, x1_bdim, x2_value, x2_bdim, p, eps, keepdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor cdist_generated_plumbing(const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional<int64_t> compute_mode) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level)) {
- return at::_ops::cdist::call(x1, x2, p, compute_mode);
- }
- Tensor x1_value;
- optional<int64_t> x1_bdim;
- std::tie(x1_value, x1_bdim) = unwrapTensorAtLevel(x1, cur_level);
- Tensor x2_value;
- optional<int64_t> x2_bdim;
- std::tie(x2_value, x2_bdim) = unwrapTensorAtLevel(x2, cur_level);
- auto results = batch_rule(x1_value, x1_bdim, x2_value, x2_bdim, p, compute_mode);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _euclidean_dist_generated_plumbing(const at::Tensor & x1, const at::Tensor & x2) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level)) {
- return at::_ops::_euclidean_dist::call(x1, x2);
- }
- Tensor x1_value;
- optional<int64_t> x1_bdim;
- std::tie(x1_value, x1_bdim) = unwrapTensorAtLevel(x1, cur_level);
- Tensor x2_value;
- optional<int64_t> x2_bdim;
- std::tie(x2_value, x2_bdim) = unwrapTensorAtLevel(x2, cur_level);
- auto results = batch_rule(x1_value, x1_bdim, x2_value, x2_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _cdist_forward_generated_plumbing(const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional<int64_t> compute_mode) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level)) {
- return at::_ops::_cdist_forward::call(x1, x2, p, compute_mode);
- }
- Tensor x1_value;
- optional<int64_t> x1_bdim;
- std::tie(x1_value, x1_bdim) = unwrapTensorAtLevel(x1, cur_level);
- Tensor x2_value;
- optional<int64_t> x2_bdim;
- std::tie(x2_value, x2_bdim) = unwrapTensorAtLevel(x2, cur_level);
- auto results = batch_rule(x1_value, x1_bdim, x2_value, x2_bdim, p, compute_mode);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _cdist_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level) && !isBatchedAtLevel(cdist, cur_level)) {
- return at::_ops::_cdist_backward::call(grad, x1, x2, p, cdist);
- }
- Tensor grad_value;
- optional<int64_t> grad_bdim;
- std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
- Tensor x1_value;
- optional<int64_t> x1_bdim;
- std::tie(x1_value, x1_bdim) = unwrapTensorAtLevel(x1, cur_level);
- Tensor x2_value;
- optional<int64_t> x2_bdim;
- std::tie(x2_value, x2_bdim) = unwrapTensorAtLevel(x2, cur_level);
- Tensor cdist_value;
- optional<int64_t> cdist_bdim;
- std::tie(cdist_value, cdist_bdim) = unwrapTensorAtLevel(cdist, cur_level);
- auto results = batch_rule(grad_value, grad_bdim, x1_value, x1_bdim, x2_value, x2_bdim, p, cdist_value, cdist_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor pdist_generated_plumbing(const at::Tensor & self, double p) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::pdist::call(self, p);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, p);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _pdist_forward_generated_plumbing(const at::Tensor & self, double p) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_pdist_forward::call(self, p);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, p);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _pdist_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(pdist, cur_level)) {
- return at::_ops::_pdist_backward::call(grad, self, p, pdist);
- }
- Tensor grad_value;
- optional<int64_t> grad_bdim;
- std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor pdist_value;
- optional<int64_t> pdist_bdim;
- std::tie(pdist_value, pdist_bdim) = unwrapTensorAtLevel(pdist, cur_level);
- auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, p, pdist_value, pdist_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor cosine_similarity_generated_plumbing(const at::Tensor & x1, const at::Tensor & x2, int64_t dim, double eps) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level)) {
- return at::_ops::cosine_similarity::call(x1, x2, dim, eps);
- }
- Tensor x1_value;
- optional<int64_t> x1_bdim;
- std::tie(x1_value, x1_bdim) = unwrapTensorAtLevel(x1, cur_level);
- Tensor x2_value;
- optional<int64_t> x2_bdim;
- std::tie(x2_value, x2_bdim) = unwrapTensorAtLevel(x2, cur_level);
- auto results = batch_rule(x1_value, x1_bdim, x2_value, x2_bdim, dim, eps);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor permute_generated_plumbing(const at::Tensor & self, at::IntArrayRef dims) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::permute::call(self, dims);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dims);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor movedim_intlist_generated_plumbing(const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::movedim_intlist::call(self, source, destination);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, source, destination);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor movedim_int_generated_plumbing(const at::Tensor & self, int64_t source, int64_t destination) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::movedim_int::call(self, source, destination);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, source, destination);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor moveaxis_intlist_generated_plumbing(const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::moveaxis_intlist::call(self, source, destination);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, source, destination);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor moveaxis_int_generated_plumbing(const at::Tensor & self, int64_t source, int64_t destination) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::moveaxis_int::call(self, source, destination);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, source, destination);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor numpy_T_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::numpy_T::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor matrix_H_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::matrix_H::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor mT_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::mT::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor mH_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::mH::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor adjoint_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::adjoint::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor pixel_shuffle_generated_plumbing(const at::Tensor & self, int64_t upscale_factor) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::pixel_shuffle::call(self, upscale_factor);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, upscale_factor);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor pixel_unshuffle_generated_plumbing(const at::Tensor & self, int64_t downscale_factor) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::pixel_unshuffle::call(self, downscale_factor);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, downscale_factor);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor channel_shuffle_generated_plumbing(const at::Tensor & self, int64_t groups) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::channel_shuffle::call(self, groups);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, groups);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor native_channel_shuffle_generated_plumbing(const at::Tensor & self, int64_t groups) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::native_channel_shuffle::call(self, groups);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, groups);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor pin_memory_generated_plumbing(const at::Tensor & self, c10::optional<at::Device> device) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::pin_memory::call(self, device);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, device);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _pin_memory_generated_plumbing(const at::Tensor & self, c10::optional<at::Device> device) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_pin_memory::call(self, device);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, device);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor pinverse_generated_plumbing(const at::Tensor & self, double rcond) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::pinverse::call(self, rcond);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, rcond);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor poisson_nll_loss_generated_plumbing(const at::Tensor & input, const at::Tensor & target, bool log_input, bool full, double eps, int64_t reduction) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(target, cur_level)) {
- return at::_ops::poisson_nll_loss::call(input, target, log_input, full, eps, reduction);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor target_value;
- optional<int64_t> target_bdim;
- std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
- auto results = batch_rule(input_value, input_bdim, target_value, target_bdim, log_input, full, eps, reduction);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor rad2deg_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::rad2deg::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & rad2deg__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::rad2deg_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor deg2rad_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::deg2rad::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & deg2rad__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::deg2rad_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor rand_like_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::rand_like::call(self, dtype, layout, device, pin_memory, memory_format);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, memory_format);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor randint_like_generated_plumbing(const at::Tensor & self, int64_t high, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::randint_like::call(self, high, dtype, layout, device, pin_memory, memory_format);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, high, dtype, layout, device, pin_memory, memory_format);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor randint_like_low_dtype_generated_plumbing(const at::Tensor & self, int64_t low, int64_t high, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::randint_like_low_dtype::call(self, low, high, dtype, layout, device, pin_memory, memory_format);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, low, high, dtype, layout, device, pin_memory, memory_format);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor randn_like_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::randn_like::call(self, dtype, layout, device, pin_memory, memory_format);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, memory_format);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor ravel_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::ravel::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor reciprocal_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::reciprocal::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & reciprocal__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::reciprocal_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor neg_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::neg::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & neg__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::neg_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor negative_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::negative::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & negative__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::negative_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor repeat_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef repeats) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::repeat::call(self, repeats);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, repeats);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor repeat_interleave_Tensor_generated_plumbing(const at::Tensor & repeats, c10::optional<int64_t> output_size) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(repeats, cur_level)) {
- return at::_ops::repeat_interleave_Tensor::call(repeats, output_size);
- }
- Tensor repeats_value;
- optional<int64_t> repeats_bdim;
- std::tie(repeats_value, repeats_bdim) = unwrapTensorAtLevel(repeats, cur_level);
- auto results = batch_rule(repeats_value, repeats_bdim, output_size);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor repeat_interleave_self_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & repeats, c10::optional<int64_t> dim, c10::optional<int64_t> output_size) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(repeats, cur_level)) {
- return at::_ops::repeat_interleave_self_Tensor::call(self, repeats, dim, output_size);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor repeats_value;
- optional<int64_t> repeats_bdim;
- std::tie(repeats_value, repeats_bdim) = unwrapTensorAtLevel(repeats, cur_level);
- auto results = batch_rule(self_value, self_bdim, repeats_value, repeats_bdim, dim, output_size);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor repeat_interleave_self_int_generated_plumbing(const at::Tensor & self, c10::SymInt repeats, c10::optional<int64_t> dim, c10::optional<int64_t> output_size) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::repeat_interleave_self_int::call(self, repeats, dim, output_size);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, repeats, dim, output_size);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor reshape_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef shape) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::reshape::call(self, shape);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, shape);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _reshape_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_reshape_copy::call(self, size);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, size);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _reshape_alias_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_reshape_alias::call(self, size, stride);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, size, stride);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _mkldnn_reshape_generated_plumbing(const at::Tensor & self, at::IntArrayRef shape) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_mkldnn_reshape::call(self, shape);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, shape);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor reshape_as_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::reshape_as::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor round_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::round::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & round__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::round_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor round_decimals_generated_plumbing(const at::Tensor & self, int64_t decimals) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::round_decimals::call(self, decimals);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, decimals);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & round__decimals_generated_plumbing(at::Tensor & self, int64_t decimals) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::round__decimals::call(self, decimals);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, decimals);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor rrelu_generated_plumbing(const at::Tensor & self, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional<at::Generator> generator) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::rrelu::call(self, lower, upper, training, generator);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, lower, upper, training, generator);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & rrelu__generated_plumbing(at::Tensor & self, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional<at::Generator> generator) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::rrelu_::call(self, lower, upper, training, generator);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, lower, upper, training, generator);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor relu_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::relu::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & relu__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::relu_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor relu6_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::relu6::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & relu6__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::relu6_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor prelu_generated_plumbing(const at::Tensor & self, const at::Tensor & weight) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
- return at::_ops::prelu::call(self, weight);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _prelu_kernel_generated_plumbing(const at::Tensor & self, const at::Tensor & weight) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
- return at::_ops::_prelu_kernel::call(self, weight);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> _prelu_kernel_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
- return at::_ops::_prelu_kernel_backward::call(grad_output, self, weight);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, weight_value, weight_bdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & gelu__generated_plumbing(at::Tensor & self, c10::string_view approximate) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::gelu_::call(self, approximate);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, approximate);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor gelu_generated_plumbing(const at::Tensor & self, c10::string_view approximate) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::gelu::call(self, approximate);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, approximate);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor gelu_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
- return at::_ops::gelu_backward::call(grad_output, self, approximate);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, approximate);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor infinitely_differentiable_gelu_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level)) {
- return at::_ops::infinitely_differentiable_gelu_backward::call(grad, self);
- }
- Tensor grad_value;
- optional<int64_t> grad_bdim;
- std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor hardshrink_generated_plumbing(const at::Tensor & self, const at::Scalar & lambd) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::hardshrink::call(self, lambd);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, lambd);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor hardshrink_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(self, cur_level)) {
- return at::_ops::hardshrink_backward::call(grad_out, self, lambd);
- }
- Tensor grad_out_value;
- optional<int64_t> grad_out_bdim;
- std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(grad_out_value, grad_out_bdim, self_value, self_bdim, lambd);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor rsqrt_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::rsqrt::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & rsqrt__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::rsqrt_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor select_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, int64_t index) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::select_Dimname::call(self, dim, index);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, index);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor select_int_generated_plumbing(const at::Tensor & self, int64_t dim, c10::SymInt index) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::select_int::call(self, dim, index);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, index);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor select_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level)) {
- return at::_ops::select_backward::call(grad_output, input_sizes, dim, index);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, input_sizes, dim, index);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _nested_select_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, c10::SymInt index) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_nested_select_backward::call(grad_output, self, dim, index);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, dim, index);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor selu_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::selu::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & selu__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::selu_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor celu_generated_plumbing(const at::Tensor & self, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::celu::call(self, alpha);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, alpha);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & celu__generated_plumbing(at::Tensor & self, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::celu_::call(self, alpha);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, alpha);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor silu_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::silu::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & silu__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::silu_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor silu_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
- return at::_ops::silu_backward::call(grad_output, self);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor mish_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::mish::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & mish__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::mish_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor mish_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
- return at::_ops::mish_backward::call(grad_output, self);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor sigmoid_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::sigmoid::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & sigmoid__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::sigmoid_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor logit_generated_plumbing(const at::Tensor & self, c10::optional<double> eps) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::logit::call(self, eps);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, eps);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & logit__generated_plumbing(at::Tensor & self, c10::optional<double> eps) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::logit_::call(self, eps);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, eps);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor sin_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::sin::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & sin__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::sin_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor sinc_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::sinc::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & sinc__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::sinc_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor sinh_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::sinh::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & sinh__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::sinh_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor detach_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::detach::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor slice_Tensor_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::slice_Tensor::call(self, dim, start, end, step);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, start, end, step);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor slice_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level)) {
- return at::_ops::slice_backward::call(grad_output, input_sizes, dim, start, end, step);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, input_sizes, dim, start, end, step);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor slice_scatter_generated_plumbing(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
- return at::_ops::slice_scatter::call(self, src, dim, start, end, step);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor src_value;
- optional<int64_t> src_bdim;
- std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
- auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, dim, start, end, step);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor select_scatter_generated_plumbing(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::SymInt index) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
- return at::_ops::select_scatter::call(self, src, dim, index);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor src_value;
- optional<int64_t> src_bdim;
- std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
- auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, dim, index);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor diagonal_scatter_generated_plumbing(const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
- return at::_ops::diagonal_scatter::call(self, src, offset, dim1, dim2);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor src_value;
- optional<int64_t> src_bdim;
- std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
- auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, offset, dim1, dim2);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor as_strided_scatter_generated_plumbing(const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
- return at::_ops::as_strided_scatter::call(self, src, size, stride, storage_offset);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor src_value;
- optional<int64_t> src_bdim;
- std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
- auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, size, stride, storage_offset);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor smm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
- return at::_ops::smm::call(self, mat2);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor mat2_value;
- optional<int64_t> mat2_bdim;
- std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
- auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor softmax_int_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::softmax_int::call(self, dim, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor softmax_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::softmax_Dimname::call(self, dim, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _softmax_generated_plumbing(const at::Tensor & self, int64_t dim, bool half_to_float) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_softmax::call(self, dim, half_to_float);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, half_to_float);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _softmax_backward_data_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level)) {
- return at::_ops::_softmax_backward_data::call(grad_output, output, dim, input_dtype);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor output_value;
- optional<int64_t> output_bdim;
- std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, dim, input_dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> unsafe_split_Tensor_generated_plumbing(const at::Tensor & self, c10::SymInt split_size, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::unsafe_split_Tensor::call(self, split_size, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, split_size, dim);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> split_Tensor_generated_plumbing(const at::Tensor & self, c10::SymInt split_size, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::split_Tensor::call(self, split_size, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, split_size, dim);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> split_sizes_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_size, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::split_sizes::call(self, split_size, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, split_size, dim);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> unsafe_split_with_sizes_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::unsafe_split_with_sizes::call(self, split_sizes, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, split_sizes, dim);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> split_with_sizes_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::split_with_sizes::call(self, split_sizes, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, split_sizes, dim);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> hsplit_int_generated_plumbing(const at::Tensor & self, int64_t sections) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::hsplit_int::call(self, sections);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, sections);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> hsplit_array_generated_plumbing(const at::Tensor & self, at::IntArrayRef indices) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::hsplit_array::call(self, indices);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, indices);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> vsplit_int_generated_plumbing(const at::Tensor & self, int64_t sections) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::vsplit_int::call(self, sections);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, sections);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> vsplit_array_generated_plumbing(const at::Tensor & self, at::IntArrayRef indices) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::vsplit_array::call(self, indices);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, indices);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> dsplit_int_generated_plumbing(const at::Tensor & self, int64_t sections) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::dsplit_int::call(self, sections);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, sections);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> dsplit_array_generated_plumbing(const at::Tensor & self, at::IntArrayRef indices) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::dsplit_array::call(self, indices);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, indices);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor squeeze_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::squeeze::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor squeeze_dim_generated_plumbing(const at::Tensor & self, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::squeeze_dim::call(self, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor squeeze_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::squeeze_dimname::call(self, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor squeeze_dims_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::squeeze_dims::call(self, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor sspaddmm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
- return at::_ops::sspaddmm::call(self, mat1, mat2, beta, alpha);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor mat1_value;
- optional<int64_t> mat1_bdim;
- std::tie(mat1_value, mat1_bdim) = unwrapTensorAtLevel(mat1, cur_level);
- Tensor mat2_value;
- optional<int64_t> mat2_bdim;
- std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
- auto results = batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor stack_generated_plumbing(at::TensorList tensors, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(tensors, cur_level)) {
- return at::_ops::stack::call(tensors, dim);
- }
- auto results = batch_rule(tensors, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _stack_generated_plumbing(at::TensorList tensors, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(tensors, cur_level)) {
- return at::_ops::_stack::call(tensors, dim);
- }
- auto results = batch_rule(tensors, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor hstack_generated_plumbing(at::TensorList tensors) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(tensors, cur_level)) {
- return at::_ops::hstack::call(tensors);
- }
- auto results = batch_rule(tensors);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor vstack_generated_plumbing(at::TensorList tensors) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(tensors, cur_level)) {
- return at::_ops::vstack::call(tensors);
- }
- auto results = batch_rule(tensors);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor dstack_generated_plumbing(at::TensorList tensors) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(tensors, cur_level)) {
- return at::_ops::dstack::call(tensors);
- }
- auto results = batch_rule(tensors);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor stft_generated_plumbing(const at::Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const c10::optional<at::Tensor> & window, bool normalized, c10::optional<bool> onesided, c10::optional<bool> return_complex) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(window, cur_level)) {
- return at::_ops::stft::call(self, n_fft, hop_length, win_length, window, normalized, onesided, return_complex);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- optional<Tensor> window_value;
- optional<int64_t> window_bdim;
- if (window) {
- std::tie(window_value, window_bdim) = unwrapTensorAtLevel(window.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, n_fft, hop_length, win_length, window_value, window_bdim, normalized, onesided, return_complex);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor stft_center_generated_plumbing(const at::Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const c10::optional<at::Tensor> & window, bool center, c10::string_view pad_mode, bool normalized, c10::optional<bool> onesided, c10::optional<bool> return_complex) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(window, cur_level)) {
- return at::_ops::stft_center::call(self, n_fft, hop_length, win_length, window, center, pad_mode, normalized, onesided, return_complex);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- optional<Tensor> window_value;
- optional<int64_t> window_bdim;
- if (window) {
- std::tie(window_value, window_bdim) = unwrapTensorAtLevel(window.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, n_fft, hop_length, win_length, window_value, window_bdim, center, pad_mode, normalized, onesided, return_complex);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor istft_generated_plumbing(const at::Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const c10::optional<at::Tensor> & window, bool center, bool normalized, c10::optional<bool> onesided, c10::optional<int64_t> length, bool return_complex) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(window, cur_level)) {
- return at::_ops::istft::call(self, n_fft, hop_length, win_length, window, center, normalized, onesided, length, return_complex);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- optional<Tensor> window_value;
- optional<int64_t> window_bdim;
- if (window) {
- std::tie(window_value, window_bdim) = unwrapTensorAtLevel(window.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, n_fft, hop_length, win_length, window_value, window_bdim, center, normalized, onesided, length, return_complex);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor sum_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::sum::call(self, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor sum_dim_IntList_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::sum_dim_IntList::call(self, dim, keepdim, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor sum_dim_DimnameList_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::sum_dim_DimnameList::call(self, dim, keepdim, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _nested_sum_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_nested_sum_backward::call(grad, self, dim, keepdim);
- }
- Tensor grad_value;
- optional<int64_t> grad_bdim;
- std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, dim, keepdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor nansum_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::nansum::call(self, dim, keepdim, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor sum_to_size_generated_plumbing(const at::Tensor & self, at::IntArrayRef size) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::sum_to_size::call(self, size);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, size);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor sqrt_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::sqrt::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & sqrt__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::sqrt_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor square_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::square::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & square__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::square_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor std_generated_plumbing(const at::Tensor & self, bool unbiased) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::std::call(self, unbiased);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, unbiased);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor std_dim_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::std_dim::call(self, dim, unbiased, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor std_correction_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::std_correction::call(self, dim, correction, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> std_mean_generated_plumbing(const at::Tensor & self, bool unbiased) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::std_mean::call(self, unbiased);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, unbiased);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> std_mean_dim_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::std_mean_dim::call(self, dim, unbiased, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> std_mean_correction_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::std_mean_correction::call(self, dim, correction, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> std_mean_names_dim_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::std_mean_names_dim::call(self, dim, unbiased, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> std_mean_correction_names_generated_plumbing(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::std_mean_correction_names::call(self, dim, correction, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor std_names_dim_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::std_names_dim::call(self, dim, unbiased, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor std_correction_names_generated_plumbing(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::std_correction_names::call(self, dim, correction, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor prod_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::prod::call(self, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor prod_dim_int_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::prod_dim_int::call(self, dim, keepdim, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor prod_dim_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::prod_dim_Dimname::call(self, dim, keepdim, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor t_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::t::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor tan_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::tan::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & tan__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::tan_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor tanh_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::tanh::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & tanh__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::tanh_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor tensordot_generated_plumbing(const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::tensordot::call(self, other, dims_self, dims_other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, dims_self, dims_other);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor threshold_generated_plumbing(const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::threshold::call(self, threshold, value);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, threshold, value);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & threshold__generated_plumbing(at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::threshold_::call(self, threshold, value);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, threshold, value);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor threshold_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
- return at::_ops::threshold_backward::call(grad_output, self, threshold);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, threshold);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor tile_generated_plumbing(const at::Tensor & self, at::IntArrayRef dims) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::tile::call(self, dims);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dims);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor transpose_int_generated_plumbing(const at::Tensor & self, int64_t dim0, int64_t dim1) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::transpose_int::call(self, dim0, dim1);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim0, dim1);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor transpose_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim0, at::Dimname dim1) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::transpose_Dimname::call(self, dim0, dim1);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim0, dim1);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _mkldnn_transpose_generated_plumbing(const at::Tensor & self, int64_t dim0, int64_t dim1) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_mkldnn_transpose::call(self, dim0, dim1);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim0, dim1);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & _mkldnn_transpose__generated_plumbing(at::Tensor & self, int64_t dim0, int64_t dim1) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_mkldnn_transpose_::call(self, dim0, dim1);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, dim0, dim1);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor one_hot_generated_plumbing(const at::Tensor & self, int64_t num_classes) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::one_hot::call(self, num_classes);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, num_classes);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor flip_generated_plumbing(const at::Tensor & self, at::IntArrayRef dims) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::flip::call(self, dims);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dims);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fliplr_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::fliplr::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor flipud_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::flipud::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor roll_generated_plumbing(const at::Tensor & self, at::IntArrayRef shifts, at::IntArrayRef dims) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::roll::call(self, shifts, dims);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, shifts, dims);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor rot90_generated_plumbing(const at::Tensor & self, int64_t k, at::IntArrayRef dims) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::rot90::call(self, k, dims);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, k, dims);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor trapezoid_x_generated_plumbing(const at::Tensor & y, const at::Tensor & x, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(y, cur_level) && !isBatchedAtLevel(x, cur_level)) {
- return at::_ops::trapezoid_x::call(y, x, dim);
- }
- Tensor y_value;
- optional<int64_t> y_bdim;
- std::tie(y_value, y_bdim) = unwrapTensorAtLevel(y, cur_level);
- Tensor x_value;
- optional<int64_t> x_bdim;
- std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
- auto results = batch_rule(y_value, y_bdim, x_value, x_bdim, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor trapezoid_dx_generated_plumbing(const at::Tensor & y, const at::Scalar & dx, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(y, cur_level)) {
- return at::_ops::trapezoid_dx::call(y, dx, dim);
- }
- Tensor y_value;
- optional<int64_t> y_bdim;
- std::tie(y_value, y_bdim) = unwrapTensorAtLevel(y, cur_level);
- auto results = batch_rule(y_value, y_bdim, dx, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor trapz_x_generated_plumbing(const at::Tensor & y, const at::Tensor & x, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(y, cur_level) && !isBatchedAtLevel(x, cur_level)) {
- return at::_ops::trapz_x::call(y, x, dim);
- }
- Tensor y_value;
- optional<int64_t> y_bdim;
- std::tie(y_value, y_bdim) = unwrapTensorAtLevel(y, cur_level);
- Tensor x_value;
- optional<int64_t> x_bdim;
- std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
- auto results = batch_rule(y_value, y_bdim, x_value, x_bdim, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor trapz_dx_generated_plumbing(const at::Tensor & y, double dx, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(y, cur_level)) {
- return at::_ops::trapz_dx::call(y, dx, dim);
- }
- Tensor y_value;
- optional<int64_t> y_bdim;
- std::tie(y_value, y_bdim) = unwrapTensorAtLevel(y, cur_level);
- auto results = batch_rule(y_value, y_bdim, dx, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _transform_bias_rescale_qkv_generated_plumbing(const at::Tensor & qkv, const at::Tensor & qkv_bias, int64_t num_heads) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(qkv, cur_level) && !isBatchedAtLevel(qkv_bias, cur_level)) {
- return at::_ops::_transform_bias_rescale_qkv::call(qkv, qkv_bias, num_heads);
- }
- Tensor qkv_value;
- optional<int64_t> qkv_bdim;
- std::tie(qkv_value, qkv_bdim) = unwrapTensorAtLevel(qkv, cur_level);
- Tensor qkv_bias_value;
- optional<int64_t> qkv_bias_bdim;
- std::tie(qkv_bias_value, qkv_bias_bdim) = unwrapTensorAtLevel(qkv_bias, cur_level);
- auto results = batch_rule(qkv_value, qkv_bdim, qkv_bias_value, qkv_bias_bdim, num_heads);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _nested_tensor_from_mask_generated_plumbing(const at::Tensor & t, const at::Tensor & mask, bool mask_check) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(t, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
- return at::_ops::_nested_tensor_from_mask::call(t, mask, mask_check);
- }
- Tensor t_value;
- optional<int64_t> t_bdim;
- std::tie(t_value, t_bdim) = unwrapTensorAtLevel(t, cur_level);
- Tensor mask_value;
- optional<int64_t> mask_bdim;
- std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
- auto results = batch_rule(t_value, t_bdim, mask_value, mask_bdim, mask_check);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _nested_from_padded_generated_plumbing(const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(padded, cur_level) && !isBatchedAtLevel(cpu_nested_shape_example, cur_level)) {
- return at::_ops::_nested_from_padded::call(padded, cpu_nested_shape_example, fuse_transform_0213);
- }
- Tensor padded_value;
- optional<int64_t> padded_bdim;
- std::tie(padded_value, padded_bdim) = unwrapTensorAtLevel(padded, cur_level);
- Tensor cpu_nested_shape_example_value;
- optional<int64_t> cpu_nested_shape_example_bdim;
- std::tie(cpu_nested_shape_example_value, cpu_nested_shape_example_bdim) = unwrapTensorAtLevel(cpu_nested_shape_example, cur_level);
- auto results = batch_rule(padded_value, padded_bdim, cpu_nested_shape_example_value, cpu_nested_shape_example_bdim, fuse_transform_0213);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _nested_tensor_size_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_nested_tensor_size::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _nested_tensor_strides_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_nested_tensor_strides::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _nested_from_padded_and_nested_example_generated_plumbing(const at::Tensor & padded, const at::Tensor & nt_example) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(padded, cur_level) && !isBatchedAtLevel(nt_example, cur_level)) {
- return at::_ops::_nested_from_padded_and_nested_example::call(padded, nt_example);
- }
- Tensor padded_value;
- optional<int64_t> padded_bdim;
- std::tie(padded_value, padded_bdim) = unwrapTensorAtLevel(padded, cur_level);
- Tensor nt_example_value;
- optional<int64_t> nt_example_bdim;
- std::tie(nt_example_value, nt_example_bdim) = unwrapTensorAtLevel(nt_example, cur_level);
- auto results = batch_rule(padded_value, padded_bdim, nt_example_value, nt_example_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _nested_view_from_buffer_generated_plumbing(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, at::IntArrayRef offsets) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(nested_size, cur_level) && !isBatchedAtLevel(nested_strides, cur_level)) {
- return at::_ops::_nested_view_from_buffer::call(self, nested_size, nested_strides, offsets);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor nested_size_value;
- optional<int64_t> nested_size_bdim;
- std::tie(nested_size_value, nested_size_bdim) = unwrapTensorAtLevel(nested_size, cur_level);
- Tensor nested_strides_value;
- optional<int64_t> nested_strides_bdim;
- std::tie(nested_strides_value, nested_strides_bdim) = unwrapTensorAtLevel(nested_strides, cur_level);
- auto results = batch_rule(self_value, self_bdim, nested_size_value, nested_size_bdim, nested_strides_value, nested_strides_bdim, offsets);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _nested_view_from_buffer_copy_generated_plumbing(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, at::IntArrayRef offsets) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(nested_size, cur_level) && !isBatchedAtLevel(nested_strides, cur_level)) {
- return at::_ops::_nested_view_from_buffer_copy::call(self, nested_size, nested_strides, offsets);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor nested_size_value;
- optional<int64_t> nested_size_bdim;
- std::tie(nested_size_value, nested_size_bdim) = unwrapTensorAtLevel(nested_size, cur_level);
- Tensor nested_strides_value;
- optional<int64_t> nested_strides_bdim;
- std::tie(nested_strides_value, nested_strides_bdim) = unwrapTensorAtLevel(nested_strides, cur_level);
- auto results = batch_rule(self_value, self_bdim, nested_size_value, nested_size_bdim, nested_strides_value, nested_strides_bdim, offsets);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _trilinear_generated_plumbing(const at::Tensor & i1, const at::Tensor & i2, const at::Tensor & i3, at::IntArrayRef expand1, at::IntArrayRef expand2, at::IntArrayRef expand3, at::IntArrayRef sumdim, int64_t unroll_dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(i1, cur_level) && !isBatchedAtLevel(i2, cur_level) && !isBatchedAtLevel(i3, cur_level)) {
- return at::_ops::_trilinear::call(i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim);
- }
- Tensor i1_value;
- optional<int64_t> i1_bdim;
- std::tie(i1_value, i1_bdim) = unwrapTensorAtLevel(i1, cur_level);
- Tensor i2_value;
- optional<int64_t> i2_bdim;
- std::tie(i2_value, i2_bdim) = unwrapTensorAtLevel(i2, cur_level);
- Tensor i3_value;
- optional<int64_t> i3_bdim;
- std::tie(i3_value, i3_bdim) = unwrapTensorAtLevel(i3, cur_level);
- auto results = batch_rule(i1_value, i1_bdim, i2_value, i2_bdim, i3_value, i3_bdim, expand1, expand2, expand3, sumdim, unroll_dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor triplet_margin_loss_generated_plumbing(const at::Tensor & anchor, const at::Tensor & positive, const at::Tensor & negative, double margin, double p, double eps, bool swap, int64_t reduction) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(anchor, cur_level) && !isBatchedAtLevel(positive, cur_level) && !isBatchedAtLevel(negative, cur_level)) {
- return at::_ops::triplet_margin_loss::call(anchor, positive, negative, margin, p, eps, swap, reduction);
- }
- Tensor anchor_value;
- optional<int64_t> anchor_bdim;
- std::tie(anchor_value, anchor_bdim) = unwrapTensorAtLevel(anchor, cur_level);
- Tensor positive_value;
- optional<int64_t> positive_bdim;
- std::tie(positive_value, positive_bdim) = unwrapTensorAtLevel(positive, cur_level);
- Tensor negative_value;
- optional<int64_t> negative_bdim;
- std::tie(negative_value, negative_bdim) = unwrapTensorAtLevel(negative, cur_level);
- auto results = batch_rule(anchor_value, anchor_bdim, positive_value, positive_bdim, negative_value, negative_bdim, margin, p, eps, swap, reduction);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor trunc_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::trunc::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & trunc__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::trunc_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fix_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::fix::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & fix__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::fix_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor type_as_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::type_as::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> _unique_generated_plumbing(const at::Tensor & self, bool sorted, bool return_inverse) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_unique::call(self, sorted, return_inverse);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, sorted, return_inverse);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> unique_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::unique_dim::call(self, dim, sorted, return_inverse, return_counts);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, sorted, return_inverse, return_counts);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> unique_consecutive_generated_plumbing(const at::Tensor & self, bool return_inverse, bool return_counts, c10::optional<int64_t> dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::unique_consecutive::call(self, return_inverse, return_counts, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, return_inverse, return_counts, dim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> unique_dim_consecutive_generated_plumbing(const at::Tensor & self, int64_t dim, bool return_inverse, bool return_counts) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::unique_dim_consecutive::call(self, dim, return_inverse, return_counts);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, return_inverse, return_counts);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _unique2_generated_plumbing(const at::Tensor & self, bool sorted, bool return_inverse, bool return_counts) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_unique2::call(self, sorted, return_inverse, return_counts);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, sorted, return_inverse, return_counts);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _unsafe_view_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_unsafe_view::call(self, size);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, size);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor unsqueeze_generated_plumbing(const at::Tensor & self, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::unsqueeze::call(self, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor vander_generated_plumbing(const at::Tensor & x, c10::optional<int64_t> N, bool increasing) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(x, cur_level)) {
- return at::_ops::vander::call(x, N, increasing);
- }
- Tensor x_value;
- optional<int64_t> x_bdim;
- std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
- auto results = batch_rule(x_value, x_bdim, N, increasing);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor var_generated_plumbing(const at::Tensor & self, bool unbiased) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::var::call(self, unbiased);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, unbiased);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor var_dim_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::var_dim::call(self, dim, unbiased, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor var_correction_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::var_correction::call(self, dim, correction, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor var_names_dim_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::var_names_dim::call(self, dim, unbiased, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor var_correction_names_generated_plumbing(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::var_correction_names::call(self, dim, correction, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> var_mean_generated_plumbing(const at::Tensor & self, bool unbiased) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::var_mean::call(self, unbiased);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, unbiased);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> var_mean_dim_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::var_mean_dim::call(self, dim, unbiased, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> var_mean_correction_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::var_mean_correction::call(self, dim, correction, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> var_mean_names_dim_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::var_mean_names_dim::call(self, dim, unbiased, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> var_mean_correction_names_generated_plumbing(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::var_mean_correction_names::call(self, dim, correction, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor view_as_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::view_as::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor where_self_generated_plumbing(const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(condition, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::where_self::call(condition, self, other);
- }
- Tensor condition_value;
- optional<int64_t> condition_bdim;
- std::tie(condition_value, condition_bdim) = unwrapTensorAtLevel(condition, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(condition_value, condition_bdim, self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor where_ScalarSelf_generated_plumbing(const at::Tensor & condition, const at::Scalar & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(condition, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::where_ScalarSelf::call(condition, self, other);
- }
- Tensor condition_value;
- optional<int64_t> condition_bdim;
- std::tie(condition_value, condition_bdim) = unwrapTensorAtLevel(condition, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(condition_value, condition_bdim, self, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor where_ScalarOther_generated_plumbing(const at::Tensor & condition, const at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(condition, cur_level) && !isBatchedAtLevel(self, cur_level)) {
- return at::_ops::where_ScalarOther::call(condition, self, other);
- }
- Tensor condition_value;
- optional<int64_t> condition_bdim;
- std::tie(condition_value, condition_bdim) = unwrapTensorAtLevel(condition, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(condition_value, condition_bdim, self_value, self_bdim, other);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor where_Scalar_generated_plumbing(const at::Tensor & condition, const at::Scalar & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(condition, cur_level)) {
- return at::_ops::where_Scalar::call(condition, self, other);
- }
- Tensor condition_value;
- optional<int64_t> condition_bdim;
- std::tie(condition_value, condition_bdim) = unwrapTensorAtLevel(condition, cur_level);
- auto results = batch_rule(condition_value, condition_bdim, self, other);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> where_generated_plumbing(const at::Tensor & condition) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(condition, cur_level)) {
- return at::_ops::where::call(condition);
- }
- Tensor condition_value;
- optional<int64_t> condition_bdim;
- std::tie(condition_value, condition_bdim) = unwrapTensorAtLevel(condition, cur_level);
- auto results = batch_rule(condition_value, condition_bdim);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor norm_except_dim_generated_plumbing(const at::Tensor & v, int64_t pow, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(v, cur_level)) {
- return at::_ops::norm_except_dim::call(v, pow, dim);
- }
- Tensor v_value;
- optional<int64_t> v_bdim;
- std::tie(v_value, v_bdim) = unwrapTensorAtLevel(v, cur_level);
- auto results = batch_rule(v_value, v_bdim, pow, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _weight_norm_generated_plumbing(const at::Tensor & v, const at::Tensor & g, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(v, cur_level) && !isBatchedAtLevel(g, cur_level)) {
- return at::_ops::_weight_norm::call(v, g, dim);
- }
- Tensor v_value;
- optional<int64_t> v_bdim;
- std::tie(v_value, v_bdim) = unwrapTensorAtLevel(v, cur_level);
- Tensor g_value;
- optional<int64_t> g_bdim;
- std::tie(g_value, g_bdim) = unwrapTensorAtLevel(g, cur_level);
- auto results = batch_rule(v_value, v_bdim, g_value, g_bdim, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> _weight_norm_interface_generated_plumbing(const at::Tensor & v, const at::Tensor & g, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(v, cur_level) && !isBatchedAtLevel(g, cur_level)) {
- return at::_ops::_weight_norm_interface::call(v, g, dim);
- }
- Tensor v_value;
- optional<int64_t> v_bdim;
- std::tie(v_value, v_bdim) = unwrapTensorAtLevel(v, cur_level);
- Tensor g_value;
- optional<int64_t> g_bdim;
- std::tie(g_value, g_bdim) = unwrapTensorAtLevel(g, cur_level);
- auto results = batch_rule(v_value, v_bdim, g_value, g_bdim, dim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> _weight_norm_interface_backward_generated_plumbing(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_w, cur_level) && !isBatchedAtLevel(saved_v, cur_level) && !isBatchedAtLevel(saved_g, cur_level) && !isBatchedAtLevel(saved_norms, cur_level)) {
- return at::_ops::_weight_norm_interface_backward::call(grad_w, saved_v, saved_g, saved_norms, dim);
- }
- Tensor grad_w_value;
- optional<int64_t> grad_w_bdim;
- std::tie(grad_w_value, grad_w_bdim) = unwrapTensorAtLevel(grad_w, cur_level);
- Tensor saved_v_value;
- optional<int64_t> saved_v_bdim;
- std::tie(saved_v_value, saved_v_bdim) = unwrapTensorAtLevel(saved_v, cur_level);
- Tensor saved_g_value;
- optional<int64_t> saved_g_bdim;
- std::tie(saved_g_value, saved_g_bdim) = unwrapTensorAtLevel(saved_g, cur_level);
- Tensor saved_norms_value;
- optional<int64_t> saved_norms_bdim;
- std::tie(saved_norms_value, saved_norms_bdim) = unwrapTensorAtLevel(saved_norms, cur_level);
- auto results = batch_rule(grad_w_value, grad_w_bdim, saved_v_value, saved_v_bdim, saved_g_value, saved_g_bdim, saved_norms_value, saved_norms_bdim, dim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> _weight_norm_differentiable_backward_generated_plumbing(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_w, cur_level) && !isBatchedAtLevel(saved_v, cur_level) && !isBatchedAtLevel(saved_g, cur_level) && !isBatchedAtLevel(saved_norms, cur_level)) {
- return at::_ops::_weight_norm_differentiable_backward::call(grad_w, saved_v, saved_g, saved_norms, dim);
- }
- Tensor grad_w_value;
- optional<int64_t> grad_w_bdim;
- std::tie(grad_w_value, grad_w_bdim) = unwrapTensorAtLevel(grad_w, cur_level);
- Tensor saved_v_value;
- optional<int64_t> saved_v_bdim;
- std::tie(saved_v_value, saved_v_bdim) = unwrapTensorAtLevel(saved_v, cur_level);
- Tensor saved_g_value;
- optional<int64_t> saved_g_bdim;
- std::tie(saved_g_value, saved_g_bdim) = unwrapTensorAtLevel(saved_g, cur_level);
- Tensor saved_norms_value;
- optional<int64_t> saved_norms_bdim;
- std::tie(saved_norms_value, saved_norms_bdim) = unwrapTensorAtLevel(saved_norms, cur_level);
- auto results = batch_rule(grad_w_value, grad_w_bdim, saved_v_value, saved_v_bdim, saved_g_value, saved_g_bdim, saved_norms_value, saved_norms_bdim, dim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor zeros_like_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::zeros_like::call(self, dtype, layout, device, pin_memory, memory_format);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, memory_format);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _standard_gamma_grad_generated_plumbing(const at::Tensor & self, const at::Tensor & output) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(output, cur_level)) {
- return at::_ops::_standard_gamma_grad::call(self, output);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor output_value;
- optional<int64_t> output_bdim;
- std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
- auto results = batch_rule(self_value, self_bdim, output_value, output_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _standard_gamma_generated_plumbing(const at::Tensor & self, c10::optional<at::Generator> generator) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_standard_gamma::call(self, generator);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, generator);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _dirichlet_grad_generated_plumbing(const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(alpha, cur_level) && !isBatchedAtLevel(total, cur_level)) {
- return at::_ops::_dirichlet_grad::call(x, alpha, total);
- }
- Tensor x_value;
- optional<int64_t> x_bdim;
- std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
- Tensor alpha_value;
- optional<int64_t> alpha_bdim;
- std::tie(alpha_value, alpha_bdim) = unwrapTensorAtLevel(alpha, cur_level);
- Tensor total_value;
- optional<int64_t> total_bdim;
- std::tie(total_value, total_bdim) = unwrapTensorAtLevel(total, cur_level);
- auto results = batch_rule(x_value, x_bdim, alpha_value, alpha_bdim, total_value, total_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _sample_dirichlet_generated_plumbing(const at::Tensor & self, c10::optional<at::Generator> generator) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_sample_dirichlet::call(self, generator);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, generator);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor poisson_generated_plumbing(const at::Tensor & self, c10::optional<at::Generator> generator) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::poisson::call(self, generator);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, generator);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor binomial_generated_plumbing(const at::Tensor & count, const at::Tensor & prob, c10::optional<at::Generator> generator) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(count, cur_level) && !isBatchedAtLevel(prob, cur_level)) {
- return at::_ops::binomial::call(count, prob, generator);
- }
- Tensor count_value;
- optional<int64_t> count_bdim;
- std::tie(count_value, count_bdim) = unwrapTensorAtLevel(count, cur_level);
- Tensor prob_value;
- optional<int64_t> prob_bdim;
- std::tie(prob_value, prob_bdim) = unwrapTensorAtLevel(prob, cur_level);
- auto results = batch_rule(count_value, count_bdim, prob_value, prob_bdim, generator);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor native_norm_generated_plumbing(const at::Tensor & self, const at::Scalar & p) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::native_norm::call(self, p);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, p);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor native_norm_ScalarOpt_dim_dtype_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::native_norm_ScalarOpt_dim_dtype::call(self, p, dim, keepdim, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, p, dim, keepdim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _sparse_sum_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_sparse_sum::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _sparse_sum_dtype_generated_plumbing(const at::Tensor & self, at::ScalarType dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_sparse_sum_dtype::call(self, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _sparse_sum_dim_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_sparse_sum_dim::call(self, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _sparse_sum_dim_dtype_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, at::ScalarType dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_sparse_sum_dim_dtype::call(self, dim, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _sparse_sum_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_sparse_sum_backward::call(grad, self, dim);
- }
- Tensor grad_value;
- optional<int64_t> grad_bdim;
- std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _sparse_csr_sum_dim_dtype_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_sparse_csr_sum_dim_dtype::call(self, dim, keepdim, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _sparse_csr_prod_dim_dtype_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_sparse_csr_prod_dim_dtype::call(self, dim, keepdim, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _sparse_softmax_int_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_sparse_softmax_int::call(self, dim, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _sparse_softmax_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_sparse_softmax_Dimname::call(self, dim, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _sparse_softmax_generated_plumbing(const at::Tensor & self, int64_t dim, bool half_to_float) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_sparse_softmax::call(self, dim, half_to_float);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, half_to_float);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _sparse_softmax_backward_data_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_sparse_softmax_backward_data::call(grad_output, output, dim, self);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor output_value;
- optional<int64_t> output_bdim;
- std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, dim, self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _sparse_log_softmax_int_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_sparse_log_softmax_int::call(self, dim, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _sparse_log_softmax_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_sparse_log_softmax_Dimname::call(self, dim, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _sparse_log_softmax_generated_plumbing(const at::Tensor & self, int64_t dim, bool half_to_float) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_sparse_log_softmax::call(self, dim, half_to_float);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, half_to_float);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _sparse_log_softmax_backward_data_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_sparse_log_softmax_backward_data::call(grad_output, output, dim, self);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor output_value;
- optional<int64_t> output_bdim;
- std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, dim, self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _spdiags_generated_plumbing(const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, c10::optional<at::Layout> layout) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(diagonals, cur_level) && !isBatchedAtLevel(offsets, cur_level)) {
- return at::_ops::_spdiags::call(diagonals, offsets, shape, layout);
- }
- Tensor diagonals_value;
- optional<int64_t> diagonals_bdim;
- std::tie(diagonals_value, diagonals_bdim) = unwrapTensorAtLevel(diagonals, cur_level);
- Tensor offsets_value;
- optional<int64_t> offsets_bdim;
- std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level);
- auto results = batch_rule(diagonals_value, diagonals_bdim, offsets_value, offsets_bdim, shape, layout);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor norm_ScalarOpt_dtype_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::ScalarType dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::norm_ScalarOpt_dtype::call(self, p, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, p, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor norm_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & p) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::norm_Scalar::call(self, p);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, p);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor norm_ScalarOpt_dim_dtype_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::norm_ScalarOpt_dim_dtype::call(self, p, dim, keepdim, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, p, dim, keepdim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor norm_ScalarOpt_dim_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::norm_ScalarOpt_dim::call(self, p, dim, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, p, dim, keepdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor norm_names_ScalarOpt_dim_dtype_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::norm_names_ScalarOpt_dim_dtype::call(self, p, dim, keepdim, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, p, dim, keepdim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor norm_names_ScalarOpt_dim_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::norm_names_ScalarOpt_dim::call(self, p, dim, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, p, dim, keepdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> frexp_Tensor_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::frexp_Tensor::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor frobenius_norm_dim_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::frobenius_norm_dim::call(self, dim, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, keepdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor nuclear_norm_generated_plumbing(const at::Tensor & self, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::nuclear_norm::call(self, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, keepdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor nuclear_norm_dim_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::nuclear_norm_dim::call(self, dim, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, keepdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor clone_generated_plumbing(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::clone::call(self, memory_format);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, memory_format);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor positive_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::positive::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- const at::Tensor & resize_as__generated_plumbing(const at::Tensor & self, const at::Tensor & the_template, c10::optional<at::MemoryFormat> memory_format) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(the_template, cur_level)) {
- return at::_ops::resize_as_::call(self, the_template, memory_format);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor the_template_value;
- optional<int64_t> the_template_bdim;
- std::tie(the_template_value, the_template_bdim) = unwrapTensorAtLevel(the_template, cur_level);
- batch_rule(self_value, self_bdim, the_template_value, the_template_bdim, memory_format);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- const at::Tensor & resize_as_sparse__generated_plumbing(const at::Tensor & self, const at::Tensor & the_template) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(the_template, cur_level)) {
- return at::_ops::resize_as_sparse_::call(self, the_template);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor the_template_value;
- optional<int64_t> the_template_bdim;
- std::tie(the_template_value, the_template_bdim) = unwrapTensorAtLevel(the_template, cur_level);
- batch_rule(self_value, self_bdim, the_template_value, the_template_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & zero__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::zero_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor sub_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::sub_Tensor::call(self, other, alpha);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & sub__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::sub__Tensor::call(self, other, alpha);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor sub_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::sub_Scalar::call(self, other, alpha);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other, alpha);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & sub__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::sub__Scalar::call(self, other, alpha);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, other, alpha);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor subtract_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::subtract_Tensor::call(self, other, alpha);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & subtract__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::subtract__Tensor::call(self, other, alpha);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor subtract_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::subtract_Scalar::call(self, other, alpha);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other, alpha);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & subtract__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::subtract__Scalar::call(self, other, alpha);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, other, alpha);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor rsub_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::rsub_Tensor::call(self, other, alpha);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor heaviside_generated_plumbing(const at::Tensor & self, const at::Tensor & values) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(values, cur_level)) {
- return at::_ops::heaviside::call(self, values);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor values_value;
- optional<int64_t> values_bdim;
- std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
- auto results = batch_rule(self_value, self_bdim, values_value, values_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & heaviside__generated_plumbing(at::Tensor & self, const at::Tensor & values) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(values, cur_level)) {
- return at::_ops::heaviside_::call(self, values);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor values_value;
- optional<int64_t> values_bdim;
- std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
- batch_rule(self_value, self_bdim, values_value, values_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor rsub_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::rsub_Scalar::call(self, other, alpha);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other, alpha);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _sparse_addmm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
- return at::_ops::_sparse_addmm::call(self, mat1, mat2, beta, alpha);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor mat1_value;
- optional<int64_t> mat1_bdim;
- std::tie(mat1_value, mat1_bdim) = unwrapTensorAtLevel(mat1, cur_level);
- Tensor mat2_value;
- optional<int64_t> mat2_bdim;
- std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
- auto results = batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor sparse_sampled_addmm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
- return at::_ops::sparse_sampled_addmm::call(self, mat1, mat2, beta, alpha);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor mat1_value;
- optional<int64_t> mat1_bdim;
- std::tie(mat1_value, mat1_bdim) = unwrapTensorAtLevel(mat1, cur_level);
- Tensor mat2_value;
- optional<int64_t> mat2_bdim;
- std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
- auto results = batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> _sparse_mm_reduce_impl_generated_plumbing(const at::Tensor & self, const at::Tensor & other, c10::string_view reduce) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::_sparse_mm_reduce_impl::call(self, other, reduce);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, reduce);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> _sparse_mm_reduce_impl_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad_out, const at::Tensor & weight, c10::string_view reduce, const at::Tensor & arg_out, ::std::array<bool,2> output_mask) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(arg_out, cur_level)) {
- return at::_ops::_sparse_mm_reduce_impl_backward::call(self, grad_out, weight, reduce, arg_out, output_mask);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor grad_out_value;
- optional<int64_t> grad_out_bdim;
- std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- Tensor arg_out_value;
- optional<int64_t> arg_out_bdim;
- std::tie(arg_out_value, arg_out_bdim) = unwrapTensorAtLevel(arg_out, cur_level);
- auto results = batch_rule(self_value, self_bdim, grad_out_value, grad_out_bdim, weight_value, weight_bdim, reduce, arg_out_value, arg_out_bdim, output_mask);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor addmm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
- return at::_ops::addmm::call(self, mat1, mat2, beta, alpha);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor mat1_value;
- optional<int64_t> mat1_bdim;
- std::tie(mat1_value, mat1_bdim) = unwrapTensorAtLevel(mat1, cur_level);
- Tensor mat2_value;
- optional<int64_t> mat2_bdim;
- std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
- auto results = batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & addmm__generated_plumbing(at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
- return at::_ops::addmm_::call(self, mat1, mat2, beta, alpha);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor mat1_value;
- optional<int64_t> mat1_bdim;
- std::tie(mat1_value, mat1_bdim) = unwrapTensorAtLevel(mat1, cur_level);
- Tensor mat2_value;
- optional<int64_t> mat2_bdim;
- std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
- batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _addmm_activation_generated_plumbing(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
- return at::_ops::_addmm_activation::call(self, mat1, mat2, beta, alpha, use_gelu);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor mat1_value;
- optional<int64_t> mat1_bdim;
- std::tie(mat1_value, mat1_bdim) = unwrapTensorAtLevel(mat1, cur_level);
- Tensor mat2_value;
- optional<int64_t> mat2_bdim;
- std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
- auto results = batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha, use_gelu);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor sparse_compressed_tensor_comp_plain_value_size_generated_plumbing(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(compressed_indices, cur_level) && !isBatchedAtLevel(plain_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
- return at::_ops::sparse_compressed_tensor_comp_plain_value_size::call(compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory);
- }
- Tensor compressed_indices_value;
- optional<int64_t> compressed_indices_bdim;
- std::tie(compressed_indices_value, compressed_indices_bdim) = unwrapTensorAtLevel(compressed_indices, cur_level);
- Tensor plain_indices_value;
- optional<int64_t> plain_indices_bdim;
- std::tie(plain_indices_value, plain_indices_bdim) = unwrapTensorAtLevel(plain_indices, cur_level);
- Tensor values_value;
- optional<int64_t> values_bdim;
- std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
- auto results = batch_rule(compressed_indices_value, compressed_indices_bdim, plain_indices_value, plain_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor sparse_csr_tensor_crow_col_value_size_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
- return at::_ops::sparse_csr_tensor_crow_col_value_size::call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
- }
- Tensor crow_indices_value;
- optional<int64_t> crow_indices_bdim;
- std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level);
- Tensor col_indices_value;
- optional<int64_t> col_indices_bdim;
- std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level);
- Tensor values_value;
- optional<int64_t> values_bdim;
- std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
- auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor sparse_csc_tensor_ccol_row_value_size_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
- return at::_ops::sparse_csc_tensor_ccol_row_value_size::call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
- }
- Tensor ccol_indices_value;
- optional<int64_t> ccol_indices_bdim;
- std::tie(ccol_indices_value, ccol_indices_bdim) = unwrapTensorAtLevel(ccol_indices, cur_level);
- Tensor row_indices_value;
- optional<int64_t> row_indices_bdim;
- std::tie(row_indices_value, row_indices_bdim) = unwrapTensorAtLevel(row_indices, cur_level);
- Tensor values_value;
- optional<int64_t> values_bdim;
- std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
- auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor sparse_bsr_tensor_crow_col_value_size_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
- return at::_ops::sparse_bsr_tensor_crow_col_value_size::call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
- }
- Tensor crow_indices_value;
- optional<int64_t> crow_indices_bdim;
- std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level);
- Tensor col_indices_value;
- optional<int64_t> col_indices_bdim;
- std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level);
- Tensor values_value;
- optional<int64_t> values_bdim;
- std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
- auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor sparse_bsc_tensor_ccol_row_value_size_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
- return at::_ops::sparse_bsc_tensor_ccol_row_value_size::call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
- }
- Tensor ccol_indices_value;
- optional<int64_t> ccol_indices_bdim;
- std::tie(ccol_indices_value, ccol_indices_bdim) = unwrapTensorAtLevel(ccol_indices, cur_level);
- Tensor row_indices_value;
- optional<int64_t> row_indices_bdim;
- std::tie(row_indices_value, row_indices_bdim) = unwrapTensorAtLevel(row_indices, cur_level);
- Tensor values_value;
- optional<int64_t> values_bdim;
- std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
- auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor sparse_compressed_tensor_comp_plain_value_generated_plumbing(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(compressed_indices, cur_level) && !isBatchedAtLevel(plain_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
- return at::_ops::sparse_compressed_tensor_comp_plain_value::call(compressed_indices, plain_indices, values, dtype, layout, device, pin_memory);
- }
- Tensor compressed_indices_value;
- optional<int64_t> compressed_indices_bdim;
- std::tie(compressed_indices_value, compressed_indices_bdim) = unwrapTensorAtLevel(compressed_indices, cur_level);
- Tensor plain_indices_value;
- optional<int64_t> plain_indices_bdim;
- std::tie(plain_indices_value, plain_indices_bdim) = unwrapTensorAtLevel(plain_indices, cur_level);
- Tensor values_value;
- optional<int64_t> values_bdim;
- std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
- auto results = batch_rule(compressed_indices_value, compressed_indices_bdim, plain_indices_value, plain_indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor sparse_csr_tensor_crow_col_value_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
- return at::_ops::sparse_csr_tensor_crow_col_value::call(crow_indices, col_indices, values, dtype, layout, device, pin_memory);
- }
- Tensor crow_indices_value;
- optional<int64_t> crow_indices_bdim;
- std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level);
- Tensor col_indices_value;
- optional<int64_t> col_indices_bdim;
- std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level);
- Tensor values_value;
- optional<int64_t> values_bdim;
- std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
- auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor sparse_csc_tensor_ccol_row_value_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
- return at::_ops::sparse_csc_tensor_ccol_row_value::call(ccol_indices, row_indices, values, dtype, layout, device, pin_memory);
- }
- Tensor ccol_indices_value;
- optional<int64_t> ccol_indices_bdim;
- std::tie(ccol_indices_value, ccol_indices_bdim) = unwrapTensorAtLevel(ccol_indices, cur_level);
- Tensor row_indices_value;
- optional<int64_t> row_indices_bdim;
- std::tie(row_indices_value, row_indices_bdim) = unwrapTensorAtLevel(row_indices, cur_level);
- Tensor values_value;
- optional<int64_t> values_bdim;
- std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
- auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor sparse_bsr_tensor_crow_col_value_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
- return at::_ops::sparse_bsr_tensor_crow_col_value::call(crow_indices, col_indices, values, dtype, layout, device, pin_memory);
- }
- Tensor crow_indices_value;
- optional<int64_t> crow_indices_bdim;
- std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level);
- Tensor col_indices_value;
- optional<int64_t> col_indices_bdim;
- std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level);
- Tensor values_value;
- optional<int64_t> values_bdim;
- std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
- auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor sparse_bsc_tensor_ccol_row_value_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
- return at::_ops::sparse_bsc_tensor_ccol_row_value::call(ccol_indices, row_indices, values, dtype, layout, device, pin_memory);
- }
- Tensor ccol_indices_value;
- optional<int64_t> ccol_indices_bdim;
- std::tie(ccol_indices_value, ccol_indices_bdim) = unwrapTensorAtLevel(ccol_indices, cur_level);
- Tensor row_indices_value;
- optional<int64_t> row_indices_bdim;
- std::tie(row_indices_value, row_indices_bdim) = unwrapTensorAtLevel(row_indices, cur_level);
- Tensor values_value;
- optional<int64_t> values_bdim;
- std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
- auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _sparse_compressed_tensor_unsafe_generated_plumbing(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(compressed_indices, cur_level) && !isBatchedAtLevel(plain_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
- return at::_ops::_sparse_compressed_tensor_unsafe::call(compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory);
- }
- Tensor compressed_indices_value;
- optional<int64_t> compressed_indices_bdim;
- std::tie(compressed_indices_value, compressed_indices_bdim) = unwrapTensorAtLevel(compressed_indices, cur_level);
- Tensor plain_indices_value;
- optional<int64_t> plain_indices_bdim;
- std::tie(plain_indices_value, plain_indices_bdim) = unwrapTensorAtLevel(plain_indices, cur_level);
- Tensor values_value;
- optional<int64_t> values_bdim;
- std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
- auto results = batch_rule(compressed_indices_value, compressed_indices_bdim, plain_indices_value, plain_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _sparse_csr_tensor_unsafe_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
- return at::_ops::_sparse_csr_tensor_unsafe::call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
- }
- Tensor crow_indices_value;
- optional<int64_t> crow_indices_bdim;
- std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level);
- Tensor col_indices_value;
- optional<int64_t> col_indices_bdim;
- std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level);
- Tensor values_value;
- optional<int64_t> values_bdim;
- std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
- auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _sparse_csc_tensor_unsafe_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
- return at::_ops::_sparse_csc_tensor_unsafe::call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
- }
- Tensor ccol_indices_value;
- optional<int64_t> ccol_indices_bdim;
- std::tie(ccol_indices_value, ccol_indices_bdim) = unwrapTensorAtLevel(ccol_indices, cur_level);
- Tensor row_indices_value;
- optional<int64_t> row_indices_bdim;
- std::tie(row_indices_value, row_indices_bdim) = unwrapTensorAtLevel(row_indices, cur_level);
- Tensor values_value;
- optional<int64_t> values_bdim;
- std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
- auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _sparse_bsr_tensor_unsafe_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
- return at::_ops::_sparse_bsr_tensor_unsafe::call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
- }
- Tensor crow_indices_value;
- optional<int64_t> crow_indices_bdim;
- std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level);
- Tensor col_indices_value;
- optional<int64_t> col_indices_bdim;
- std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level);
- Tensor values_value;
- optional<int64_t> values_bdim;
- std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
- auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _sparse_bsc_tensor_unsafe_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
- return at::_ops::_sparse_bsc_tensor_unsafe::call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
- }
- Tensor ccol_indices_value;
- optional<int64_t> ccol_indices_bdim;
- std::tie(ccol_indices_value, ccol_indices_bdim) = unwrapTensorAtLevel(ccol_indices, cur_level);
- Tensor row_indices_value;
- optional<int64_t> row_indices_bdim;
- std::tie(row_indices_value, row_indices_bdim) = unwrapTensorAtLevel(row_indices, cur_level);
- Tensor values_value;
- optional<int64_t> values_bdim;
- std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
- auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor sparse_coo_tensor_indices_generated_plumbing(const at::Tensor & indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
- return at::_ops::sparse_coo_tensor_indices::call(indices, values, dtype, layout, device, pin_memory);
- }
- Tensor indices_value;
- optional<int64_t> indices_bdim;
- std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
- Tensor values_value;
- optional<int64_t> values_bdim;
- std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
- auto results = batch_rule(indices_value, indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor sparse_coo_tensor_indices_size_generated_plumbing(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
- return at::_ops::sparse_coo_tensor_indices_size::call(indices, values, size, dtype, layout, device, pin_memory);
- }
- Tensor indices_value;
- optional<int64_t> indices_bdim;
- std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
- Tensor values_value;
- optional<int64_t> values_bdim;
- std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
- auto results = batch_rule(indices_value, indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _sparse_coo_tensor_unsafe_generated_plumbing(const at::Tensor & indices, const at::Tensor & values, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
- return at::_ops::_sparse_coo_tensor_unsafe::call(indices, values, size, dtype, layout, device, pin_memory);
- }
- Tensor indices_value;
- optional<int64_t> indices_bdim;
- std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
- Tensor values_value;
- optional<int64_t> values_bdim;
- std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
- auto results = batch_rule(indices_value, indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _validate_sparse_coo_tensor_args_generated_plumbing(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
- return at::_ops::_validate_sparse_coo_tensor_args::call(indices, values, size);
- }
- Tensor indices_value;
- optional<int64_t> indices_bdim;
- std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
- Tensor values_value;
- optional<int64_t> values_bdim;
- std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
- batch_rule(indices_value, indices_bdim, values_value, values_bdim, size);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _validate_sparse_compressed_tensor_args_generated_plumbing(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::Layout layout) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(compressed_indices, cur_level) && !isBatchedAtLevel(plain_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
- return at::_ops::_validate_sparse_compressed_tensor_args::call(compressed_indices, plain_indices, values, size, layout);
- }
- Tensor compressed_indices_value;
- optional<int64_t> compressed_indices_bdim;
- std::tie(compressed_indices_value, compressed_indices_bdim) = unwrapTensorAtLevel(compressed_indices, cur_level);
- Tensor plain_indices_value;
- optional<int64_t> plain_indices_bdim;
- std::tie(plain_indices_value, plain_indices_bdim) = unwrapTensorAtLevel(plain_indices, cur_level);
- Tensor values_value;
- optional<int64_t> values_bdim;
- std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
- batch_rule(compressed_indices_value, compressed_indices_bdim, plain_indices_value, plain_indices_bdim, values_value, values_bdim, size, layout);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _validate_sparse_csr_tensor_args_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
- return at::_ops::_validate_sparse_csr_tensor_args::call(crow_indices, col_indices, values, size);
- }
- Tensor crow_indices_value;
- optional<int64_t> crow_indices_bdim;
- std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level);
- Tensor col_indices_value;
- optional<int64_t> col_indices_bdim;
- std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level);
- Tensor values_value;
- optional<int64_t> values_bdim;
- std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
- batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _validate_sparse_csc_tensor_args_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
- return at::_ops::_validate_sparse_csc_tensor_args::call(ccol_indices, row_indices, values, size);
- }
- Tensor ccol_indices_value;
- optional<int64_t> ccol_indices_bdim;
- std::tie(ccol_indices_value, ccol_indices_bdim) = unwrapTensorAtLevel(ccol_indices, cur_level);
- Tensor row_indices_value;
- optional<int64_t> row_indices_bdim;
- std::tie(row_indices_value, row_indices_bdim) = unwrapTensorAtLevel(row_indices, cur_level);
- Tensor values_value;
- optional<int64_t> values_bdim;
- std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
- batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _validate_sparse_bsr_tensor_args_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
- return at::_ops::_validate_sparse_bsr_tensor_args::call(crow_indices, col_indices, values, size);
- }
- Tensor crow_indices_value;
- optional<int64_t> crow_indices_bdim;
- std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level);
- Tensor col_indices_value;
- optional<int64_t> col_indices_bdim;
- std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level);
- Tensor values_value;
- optional<int64_t> values_bdim;
- std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
- batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _validate_sparse_bsc_tensor_args_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
- return at::_ops::_validate_sparse_bsc_tensor_args::call(ccol_indices, row_indices, values, size);
- }
- Tensor ccol_indices_value;
- optional<int64_t> ccol_indices_bdim;
- std::tie(ccol_indices_value, ccol_indices_bdim) = unwrapTensorAtLevel(ccol_indices, cur_level);
- Tensor row_indices_value;
- optional<int64_t> row_indices_bdim;
- std::tie(row_indices_value, row_indices_bdim) = unwrapTensorAtLevel(row_indices, cur_level);
- Tensor values_value;
- optional<int64_t> values_bdim;
- std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
- batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _sparse_coo_tensor_with_dims_and_tensors_generated_plumbing(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
- return at::_ops::_sparse_coo_tensor_with_dims_and_tensors::call(sparse_dim, dense_dim, size, indices, values, dtype, layout, device, pin_memory);
- }
- Tensor indices_value;
- optional<int64_t> indices_bdim;
- std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
- Tensor values_value;
- optional<int64_t> values_bdim;
- std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
- auto results = batch_rule(sparse_dim, dense_dim, size, indices_value, indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- const at::Tensor & sparse_resize__generated_plumbing(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::sparse_resize_::call(self, size, sparse_dim, dense_dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, size, sparse_dim, dense_dim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- const at::Tensor & sparse_resize_and_clear__generated_plumbing(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::sparse_resize_and_clear_::call(self, size, sparse_dim, dense_dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, size, sparse_dim, dense_dim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor sparse_mask_generated_plumbing(const at::Tensor & self, const at::Tensor & mask) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
- return at::_ops::sparse_mask::call(self, mask);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor mask_value;
- optional<int64_t> mask_bdim;
- std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
- auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _to_cpu_generated_plumbing(at::TensorList tensors) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(tensors, cur_level)) {
- return at::_ops::_to_cpu::call(tensors);
- }
- auto results = batch_rule(tensors);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor to_dense_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::to_dense::call(self, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _to_dense_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_to_dense::call(self, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor to_dense_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & input) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(input, cur_level)) {
- return at::_ops::to_dense_backward::call(grad, input);
- }
- Tensor grad_value;
- optional<int64_t> grad_bdim;
- std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- auto results = batch_rule(grad_value, grad_bdim, input_value, input_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor coalesce_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::coalesce::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _coalesce_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_coalesce::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _indices_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_indices::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _values_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_values::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & _coalesced__generated_plumbing(at::Tensor & self, bool coalesced) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_coalesced_::call(self, coalesced);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, coalesced);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor indices_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::indices::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor values_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::values::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor crow_indices_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::crow_indices::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor col_indices_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::col_indices::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor ccol_indices_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::ccol_indices::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor row_indices_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::row_indices::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor hspmm_generated_plumbing(const at::Tensor & mat1, const at::Tensor & mat2) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
- return at::_ops::hspmm::call(mat1, mat2);
- }
- Tensor mat1_value;
- optional<int64_t> mat1_bdim;
- std::tie(mat1_value, mat1_bdim) = unwrapTensorAtLevel(mat1, cur_level);
- Tensor mat2_value;
- optional<int64_t> mat2_bdim;
- std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
- auto results = batch_rule(mat1_value, mat1_bdim, mat2_value, mat2_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & copy_sparse_to_sparse__generated_plumbing(at::Tensor & self, const at::Tensor & src, bool non_blocking) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
- return at::_ops::copy_sparse_to_sparse_::call(self, src, non_blocking);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor src_value;
- optional<int64_t> src_bdim;
- std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
- batch_rule(self_value, self_bdim, src_value, src_bdim, non_blocking);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> unbind_int_generated_plumbing(const at::Tensor & self, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::unbind_int::call(self, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> unbind_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::unbind_Dimname::call(self, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor to_sparse_sparse_dim_generated_plumbing(const at::Tensor & self, int64_t sparse_dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::to_sparse_sparse_dim::call(self, sparse_dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, sparse_dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor to_sparse_generated_plumbing(const at::Tensor & self, c10::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::to_sparse::call(self, layout, blocksize, dense_dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, layout, blocksize, dense_dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor to_sparse_csr_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> dense_dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::to_sparse_csr::call(self, dense_dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dense_dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor to_sparse_csc_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> dense_dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::to_sparse_csc::call(self, dense_dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dense_dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor to_sparse_bsr_generated_plumbing(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::to_sparse_bsr::call(self, blocksize, dense_dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, blocksize, dense_dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor to_sparse_bsc_generated_plumbing(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::to_sparse_bsc::call(self, blocksize, dense_dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, blocksize, dense_dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor to_mkldnn_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::to_mkldnn::call(self, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor mkldnn_reorder_conv2d_weight_generated_plumbing(const at::Tensor & self, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::OptionalIntArrayRef input_size) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::mkldnn_reorder_conv2d_weight::call(self, padding, stride, dilation, groups, input_size);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, padding, stride, dilation, groups, input_size);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor mkldnn_reorder_conv3d_weight_generated_plumbing(const at::Tensor & self, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::mkldnn_reorder_conv3d_weight::call(self, padding, stride, dilation, groups);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, padding, stride, dilation, groups);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor to_mkldnn_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & input) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(input, cur_level)) {
- return at::_ops::to_mkldnn_backward::call(grad, input);
- }
- Tensor grad_value;
- optional<int64_t> grad_bdim;
- std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- auto results = batch_rule(grad_value, grad_bdim, input_value, input_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor quantize_per_tensor_dynamic_generated_plumbing(const at::Tensor & self, at::ScalarType dtype, bool reduce_range) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::quantize_per_tensor_dynamic::call(self, dtype, reduce_range);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dtype, reduce_range);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor quantize_per_tensor_generated_plumbing(const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::quantize_per_tensor::call(self, scale, zero_point, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, scale, zero_point, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor quantize_per_tensor_tensor_qparams_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
- return at::_ops::quantize_per_tensor_tensor_qparams::call(self, scale, zero_point, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor scale_value;
- optional<int64_t> scale_bdim;
- std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
- Tensor zero_point_value;
- optional<int64_t> zero_point_bdim;
- std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
- auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> quantize_per_tensor_tensors_generated_plumbing(at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(tensors, cur_level) && !isBatchedAtLevel(scales, cur_level) && !isBatchedAtLevel(zero_points, cur_level)) {
- return at::_ops::quantize_per_tensor_tensors::call(tensors, scales, zero_points, dtype);
- }
- Tensor scales_value;
- optional<int64_t> scales_bdim;
- std::tie(scales_value, scales_bdim) = unwrapTensorAtLevel(scales, cur_level);
- Tensor zero_points_value;
- optional<int64_t> zero_points_bdim;
- std::tie(zero_points_value, zero_points_bdim) = unwrapTensorAtLevel(zero_points, cur_level);
- auto results = batch_rule(tensors, scales_value, scales_bdim, zero_points_value, zero_points_bdim, dtype);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor quantize_per_channel_generated_plumbing(const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scales, cur_level) && !isBatchedAtLevel(zero_points, cur_level)) {
- return at::_ops::quantize_per_channel::call(self, scales, zero_points, axis, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor scales_value;
- optional<int64_t> scales_bdim;
- std::tie(scales_value, scales_bdim) = unwrapTensorAtLevel(scales, cur_level);
- Tensor zero_points_value;
- optional<int64_t> zero_points_bdim;
- std::tie(zero_points_value, zero_points_bdim) = unwrapTensorAtLevel(zero_points, cur_level);
- auto results = batch_rule(self_value, self_bdim, scales_value, scales_bdim, zero_points_value, zero_points_bdim, axis, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor dequantize_self_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::dequantize_self::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> dequantize_tensors_generated_plumbing(at::TensorList tensors) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(tensors, cur_level)) {
- return at::_ops::dequantize_tensors::call(tensors);
- }
- auto results = batch_rule(tensors);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor q_per_channel_scales_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::q_per_channel_scales::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor q_per_channel_zero_points_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::q_per_channel_zero_points::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor int_repr_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::int_repr::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _make_per_tensor_quantized_tensor_generated_plumbing(const at::Tensor & self, double scale, int64_t zero_point) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_make_per_tensor_quantized_tensor::call(self, scale, zero_point);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, scale, zero_point);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _make_per_channel_quantized_tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
- return at::_ops::_make_per_channel_quantized_tensor::call(self, scale, zero_point, axis);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor scale_value;
- optional<int64_t> scale_bdim;
- std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
- Tensor zero_point_value;
- optional<int64_t> zero_point_bdim;
- std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
- auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, axis);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fake_quantize_per_tensor_affine_generated_plumbing(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::fake_quantize_per_tensor_affine::call(self, scale, zero_point, quant_min, quant_max);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, scale, zero_point, quant_min, quant_max);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fake_quantize_per_tensor_affine_tensor_qparams_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
- return at::_ops::fake_quantize_per_tensor_affine_tensor_qparams::call(self, scale, zero_point, quant_min, quant_max);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor scale_value;
- optional<int64_t> scale_bdim;
- std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
- Tensor zero_point_value;
- optional<int64_t> zero_point_bdim;
- std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
- auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, quant_min, quant_max);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> fake_quantize_per_tensor_affine_cachemask_generated_plumbing(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::fake_quantize_per_tensor_affine_cachemask::call(self, scale, zero_point, quant_min, quant_max);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, scale, zero_point, quant_min, quant_max);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> _fake_quantize_per_tensor_affine_cachemask_tensor_qparams_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level) && !isBatchedAtLevel(fake_quant_enabled, cur_level)) {
- return at::_ops::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams::call(self, scale, zero_point, fake_quant_enabled, quant_min, quant_max);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor scale_value;
- optional<int64_t> scale_bdim;
- std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
- Tensor zero_point_value;
- optional<int64_t> zero_point_bdim;
- std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
- Tensor fake_quant_enabled_value;
- optional<int64_t> fake_quant_enabled_bdim;
- std::tie(fake_quant_enabled_value, fake_quant_enabled_bdim) = unwrapTensorAtLevel(fake_quant_enabled, cur_level);
- auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, fake_quant_enabled_value, fake_quant_enabled_bdim, quant_min, quant_max);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fake_quantize_per_tensor_affine_cachemask_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & mask) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
- return at::_ops::fake_quantize_per_tensor_affine_cachemask_backward::call(grad, mask);
- }
- Tensor grad_value;
- optional<int64_t> grad_bdim;
- std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
- Tensor mask_value;
- optional<int64_t> mask_bdim;
- std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
- auto results = batch_rule(grad_value, grad_bdim, mask_value, mask_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _fake_quantize_learnable_per_tensor_affine_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
- return at::_ops::_fake_quantize_learnable_per_tensor_affine::call(self, scale, zero_point, quant_min, quant_max, grad_factor);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor scale_value;
- optional<int64_t> scale_bdim;
- std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
- Tensor zero_point_value;
- optional<int64_t> zero_point_bdim;
- std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
- auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, quant_min, quant_max, grad_factor);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _fake_quantize_learnable_per_tensor_affine_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
- return at::_ops::_fake_quantize_learnable_per_tensor_affine_backward::call(grad, self, scale, zero_point, quant_min, quant_max, grad_factor);
- }
- Tensor grad_value;
- optional<int64_t> grad_bdim;
- std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor scale_value;
- optional<int64_t> scale_bdim;
- std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
- Tensor zero_point_value;
- optional<int64_t> zero_point_bdim;
- std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
- auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, quant_min, quant_max, grad_factor);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fake_quantize_per_channel_affine_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
- return at::_ops::fake_quantize_per_channel_affine::call(self, scale, zero_point, axis, quant_min, quant_max);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor scale_value;
- optional<int64_t> scale_bdim;
- std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
- Tensor zero_point_value;
- optional<int64_t> zero_point_bdim;
- std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
- auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, axis, quant_min, quant_max);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> fake_quantize_per_channel_affine_cachemask_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
- return at::_ops::fake_quantize_per_channel_affine_cachemask::call(self, scale, zero_point, axis, quant_min, quant_max);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor scale_value;
- optional<int64_t> scale_bdim;
- std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
- Tensor zero_point_value;
- optional<int64_t> zero_point_bdim;
- std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
- auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, axis, quant_min, quant_max);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fake_quantize_per_channel_affine_cachemask_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & mask) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
- return at::_ops::fake_quantize_per_channel_affine_cachemask_backward::call(grad, mask);
- }
- Tensor grad_value;
- optional<int64_t> grad_bdim;
- std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
- Tensor mask_value;
- optional<int64_t> mask_bdim;
- std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
- auto results = batch_rule(grad_value, grad_bdim, mask_value, mask_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _fake_quantize_learnable_per_channel_affine_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
- return at::_ops::_fake_quantize_learnable_per_channel_affine::call(self, scale, zero_point, axis, quant_min, quant_max, grad_factor);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor scale_value;
- optional<int64_t> scale_bdim;
- std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
- Tensor zero_point_value;
- optional<int64_t> zero_point_bdim;
- std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
- auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, axis, quant_min, quant_max, grad_factor);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _fake_quantize_learnable_per_channel_affine_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
- return at::_ops::_fake_quantize_learnable_per_channel_affine_backward::call(grad, self, scale, zero_point, axis, quant_min, quant_max, grad_factor);
- }
- Tensor grad_value;
- optional<int64_t> grad_bdim;
- std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor scale_value;
- optional<int64_t> scale_bdim;
- std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
- Tensor zero_point_value;
- optional<int64_t> zero_point_bdim;
- std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
- auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, axis, quant_min, quant_max, grad_factor);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _saturate_weight_to_fp16_generated_plumbing(const at::Tensor & weight) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(weight, cur_level)) {
- return at::_ops::_saturate_weight_to_fp16::call(weight);
- }
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- auto results = batch_rule(weight_value, weight_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> choose_qparams_optimized_generated_plumbing(const at::Tensor & input, int64_t numel, int64_t n_bins, double ratio, int64_t bit_width) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level)) {
- return at::_ops::choose_qparams_optimized::call(input, numel, n_bins, ratio, bit_width);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- auto results = batch_rule(input_value, input_bdim, numel, n_bins, ratio, bit_width);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _autocast_to_reduced_precision_generated_plumbing(const at::Tensor & self, bool cuda_enabled, bool cpu_enabled, at::ScalarType cuda_dtype, at::ScalarType cpu_dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_autocast_to_reduced_precision::call(self, cuda_enabled, cpu_enabled, cuda_dtype, cpu_dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, cuda_enabled, cpu_enabled, cuda_dtype, cpu_dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _autocast_to_full_precision_generated_plumbing(const at::Tensor & self, bool cuda_enabled, bool cpu_enabled) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_autocast_to_full_precision::call(self, cuda_enabled, cpu_enabled);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, cuda_enabled, cpu_enabled);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _to_copy_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, bool non_blocking, c10::optional<at::MemoryFormat> memory_format) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_to_copy::call(self, dtype, layout, device, pin_memory, non_blocking, memory_format);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, non_blocking, memory_format);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor to_dtype_layout_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::to_dtype_layout::call(self, dtype, layout, device, pin_memory, non_blocking, copy, memory_format);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, non_blocking, copy, memory_format);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor to_device_generated_plumbing(const at::Tensor & self, at::Device device, at::ScalarType dtype, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::to_device::call(self, device, dtype, non_blocking, copy, memory_format);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, device, dtype, non_blocking, copy, memory_format);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor to_dtype_generated_plumbing(const at::Tensor & self, at::ScalarType dtype, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::to_dtype::call(self, dtype, non_blocking, copy, memory_format);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dtype, non_blocking, copy, memory_format);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor to_other_generated_plumbing(const at::Tensor & self, const at::Tensor & other, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::to_other::call(self, other, non_blocking, copy, memory_format);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, non_blocking, copy, memory_format);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> meshgrid_generated_plumbing(at::TensorList tensors) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(tensors, cur_level)) {
- return at::_ops::meshgrid::call(tensors);
- }
- auto results = batch_rule(tensors);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> meshgrid_indexing_generated_plumbing(at::TensorList tensors, c10::string_view indexing) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(tensors, cur_level)) {
- return at::_ops::meshgrid_indexing::call(tensors, indexing);
- }
- auto results = batch_rule(tensors, indexing);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor cartesian_prod_generated_plumbing(at::TensorList tensors) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(tensors, cur_level)) {
- return at::_ops::cartesian_prod::call(tensors);
- }
- auto results = batch_rule(tensors);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor combinations_generated_plumbing(const at::Tensor & self, int64_t r, bool with_replacement) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::combinations::call(self, r, with_replacement);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, r, with_replacement);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _lstm_mps_generated_plumbing(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
- return at::_ops::_lstm_mps::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- auto results = batch_rule(input_value, input_bdim, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level), makeBatched(std::get<10>(results), std::get<11>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,::std::vector<at::Tensor>,::std::vector<at::Tensor>> lstm_mps_backward_generated_plumbing(const at::Tensor & grad_y, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_y, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(z_state, cur_level) && !isBatchedAtLevel(cell_state_fwd, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(layersOutputs, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
- return at::_ops::lstm_mps_backward::call(grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, layersOutputs, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
- }
- Tensor grad_y_value;
- optional<int64_t> grad_y_bdim;
- std::tie(grad_y_value, grad_y_bdim) = unwrapTensorAtLevel(grad_y, cur_level);
- Tensor z_state_value;
- optional<int64_t> z_state_bdim;
- std::tie(z_state_value, z_state_bdim) = unwrapTensorAtLevel(z_state, cur_level);
- Tensor cell_state_fwd_value;
- optional<int64_t> cell_state_fwd_bdim;
- std::tie(cell_state_fwd_value, cell_state_fwd_bdim) = unwrapTensorAtLevel(cell_state_fwd, cur_level);
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor layersOutputs_value;
- optional<int64_t> layersOutputs_bdim;
- std::tie(layersOutputs_value, layersOutputs_bdim) = unwrapTensorAtLevel(layersOutputs, cur_level);
- optional<Tensor> grad_hy_value;
- optional<int64_t> grad_hy_bdim;
- if (grad_hy) {
- std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
- }
- optional<Tensor> grad_cy_value;
- optional<int64_t> grad_cy_bdim;
- if (grad_cy) {
- std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
- }
- auto results = batch_rule(grad_y_value, grad_y_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, z_state_value, z_state_bdim, cell_state_fwd_value, cell_state_fwd_bdim, input_value, input_bdim, layersOutputs_value, layersOutputs_bdim, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level), makeBatchedVector(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _thnn_fused_lstm_cell_generated_plumbing(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input_gates, cur_level) && !isBatchedAtLevel(hidden_gates, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(input_bias, cur_level) && !isBatchedAtLevel(hidden_bias, cur_level)) {
- return at::_ops::_thnn_fused_lstm_cell::call(input_gates, hidden_gates, cx, input_bias, hidden_bias);
- }
- Tensor input_gates_value;
- optional<int64_t> input_gates_bdim;
- std::tie(input_gates_value, input_gates_bdim) = unwrapTensorAtLevel(input_gates, cur_level);
- Tensor hidden_gates_value;
- optional<int64_t> hidden_gates_bdim;
- std::tie(hidden_gates_value, hidden_gates_bdim) = unwrapTensorAtLevel(hidden_gates, cur_level);
- Tensor cx_value;
- optional<int64_t> cx_bdim;
- std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx, cur_level);
- optional<Tensor> input_bias_value;
- optional<int64_t> input_bias_bdim;
- if (input_bias) {
- std::tie(input_bias_value, input_bias_bdim) = unwrapTensorAtLevel(input_bias.value(), cur_level);
- }
- optional<Tensor> hidden_bias_value;
- optional<int64_t> hidden_bias_bdim;
- if (hidden_bias) {
- std::tie(hidden_bias_value, hidden_bias_bdim) = unwrapTensorAtLevel(hidden_bias.value(), cur_level);
- }
- auto results = batch_rule(input_gates_value, input_gates_bdim, hidden_gates_value, hidden_gates_bdim, cx_value, cx_bdim, input_bias_value, input_bias_bdim, hidden_bias_value, hidden_bias_bdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _thnn_fused_lstm_cell_backward_impl_generated_plumbing(const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(cy, cur_level) && !isBatchedAtLevel(workspace, cur_level)) {
- return at::_ops::_thnn_fused_lstm_cell_backward_impl::call(grad_hy, grad_cy, cx, cy, workspace, has_bias);
- }
- Tensor cx_value;
- optional<int64_t> cx_bdim;
- std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx, cur_level);
- Tensor cy_value;
- optional<int64_t> cy_bdim;
- std::tie(cy_value, cy_bdim) = unwrapTensorAtLevel(cy, cur_level);
- Tensor workspace_value;
- optional<int64_t> workspace_bdim;
- std::tie(workspace_value, workspace_bdim) = unwrapTensorAtLevel(workspace, cur_level);
- optional<Tensor> grad_hy_value;
- optional<int64_t> grad_hy_bdim;
- if (grad_hy) {
- std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
- }
- optional<Tensor> grad_cy_value;
- optional<int64_t> grad_cy_bdim;
- if (grad_cy) {
- std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
- }
- auto results = batch_rule(grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, cx_value, cx_bdim, cy_value, cy_bdim, workspace_value, workspace_bdim, has_bias);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_fused_lstm_cell_backward_generated_plumbing(const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(cy, cur_level) && !isBatchedAtLevel(workspace, cur_level)) {
- return at::_ops::_thnn_fused_lstm_cell_backward::call(grad_hy, grad_cy, cx, cy, workspace, has_bias);
- }
- Tensor cx_value;
- optional<int64_t> cx_bdim;
- std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx, cur_level);
- Tensor cy_value;
- optional<int64_t> cy_bdim;
- std::tie(cy_value, cy_bdim) = unwrapTensorAtLevel(cy, cur_level);
- Tensor workspace_value;
- optional<int64_t> workspace_bdim;
- std::tie(workspace_value, workspace_bdim) = unwrapTensorAtLevel(workspace, cur_level);
- optional<Tensor> grad_hy_value;
- optional<int64_t> grad_hy_bdim;
- if (grad_hy) {
- std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
- }
- optional<Tensor> grad_cy_value;
- optional<int64_t> grad_cy_bdim;
- if (grad_cy) {
- std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
- }
- auto results = batch_rule(grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, cx_value, cx_bdim, cy_value, cy_bdim, workspace_value, workspace_bdim, has_bias);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_differentiable_lstm_cell_backward_generated_plumbing(const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias, const at::Tensor & cx, const at::Tensor & cy) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(input_gates, cur_level) && !isBatchedAtLevel(hidden_gates, cur_level) && !isBatchedAtLevel(input_bias, cur_level) && !isBatchedAtLevel(hidden_bias, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(cy, cur_level)) {
- return at::_ops::_thnn_differentiable_lstm_cell_backward::call(grad_hy, grad_cy, input_gates, hidden_gates, input_bias, hidden_bias, cx, cy);
- }
- Tensor input_gates_value;
- optional<int64_t> input_gates_bdim;
- std::tie(input_gates_value, input_gates_bdim) = unwrapTensorAtLevel(input_gates, cur_level);
- Tensor hidden_gates_value;
- optional<int64_t> hidden_gates_bdim;
- std::tie(hidden_gates_value, hidden_gates_bdim) = unwrapTensorAtLevel(hidden_gates, cur_level);
- Tensor cx_value;
- optional<int64_t> cx_bdim;
- std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx, cur_level);
- Tensor cy_value;
- optional<int64_t> cy_bdim;
- std::tie(cy_value, cy_bdim) = unwrapTensorAtLevel(cy, cur_level);
- optional<Tensor> grad_hy_value;
- optional<int64_t> grad_hy_bdim;
- if (grad_hy) {
- std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
- }
- optional<Tensor> grad_cy_value;
- optional<int64_t> grad_cy_bdim;
- if (grad_cy) {
- std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
- }
- optional<Tensor> input_bias_value;
- optional<int64_t> input_bias_bdim;
- if (input_bias) {
- std::tie(input_bias_value, input_bias_bdim) = unwrapTensorAtLevel(input_bias.value(), cur_level);
- }
- optional<Tensor> hidden_bias_value;
- optional<int64_t> hidden_bias_bdim;
- if (hidden_bias) {
- std::tie(hidden_bias_value, hidden_bias_bdim) = unwrapTensorAtLevel(hidden_bias.value(), cur_level);
- }
- auto results = batch_rule(grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, input_gates_value, input_gates_bdim, hidden_gates_value, hidden_gates_bdim, input_bias_value, input_bias_bdim, hidden_bias_value, hidden_bias_bdim, cx_value, cx_bdim, cy_value, cy_bdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> _thnn_fused_gru_cell_generated_plumbing(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input_gates, cur_level) && !isBatchedAtLevel(hidden_gates, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(input_bias, cur_level) && !isBatchedAtLevel(hidden_bias, cur_level)) {
- return at::_ops::_thnn_fused_gru_cell::call(input_gates, hidden_gates, hx, input_bias, hidden_bias);
- }
- Tensor input_gates_value;
- optional<int64_t> input_gates_bdim;
- std::tie(input_gates_value, input_gates_bdim) = unwrapTensorAtLevel(input_gates, cur_level);
- Tensor hidden_gates_value;
- optional<int64_t> hidden_gates_bdim;
- std::tie(hidden_gates_value, hidden_gates_bdim) = unwrapTensorAtLevel(hidden_gates, cur_level);
- Tensor hx_value;
- optional<int64_t> hx_bdim;
- std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
- optional<Tensor> input_bias_value;
- optional<int64_t> input_bias_bdim;
- if (input_bias) {
- std::tie(input_bias_value, input_bias_bdim) = unwrapTensorAtLevel(input_bias.value(), cur_level);
- }
- optional<Tensor> hidden_bias_value;
- optional<int64_t> hidden_bias_bdim;
- if (hidden_bias) {
- std::tie(hidden_bias_value, hidden_bias_bdim) = unwrapTensorAtLevel(hidden_bias.value(), cur_level);
- }
- auto results = batch_rule(input_gates_value, input_gates_bdim, hidden_gates_value, hidden_gates_bdim, hx_value, hx_bdim, input_bias_value, input_bias_bdim, hidden_bias_value, hidden_bias_bdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_fused_gru_cell_backward_generated_plumbing(const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(workspace, cur_level)) {
- return at::_ops::_thnn_fused_gru_cell_backward::call(grad_hy, workspace, has_bias);
- }
- Tensor grad_hy_value;
- optional<int64_t> grad_hy_bdim;
- std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy, cur_level);
- Tensor workspace_value;
- optional<int64_t> workspace_bdim;
- std::tie(workspace_value, workspace_bdim) = unwrapTensorAtLevel(workspace, cur_level);
- auto results = batch_rule(grad_hy_value, grad_hy_bdim, workspace_value, workspace_bdim, has_bias);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_differentiable_gru_cell_backward_generated_plumbing(const at::Tensor & grad_hy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(input_gates, cur_level) && !isBatchedAtLevel(hidden_gates, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(input_bias, cur_level) && !isBatchedAtLevel(hidden_bias, cur_level)) {
- return at::_ops::_thnn_differentiable_gru_cell_backward::call(grad_hy, input_gates, hidden_gates, hx, input_bias, hidden_bias);
- }
- Tensor grad_hy_value;
- optional<int64_t> grad_hy_bdim;
- std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy, cur_level);
- Tensor input_gates_value;
- optional<int64_t> input_gates_bdim;
- std::tie(input_gates_value, input_gates_bdim) = unwrapTensorAtLevel(input_gates, cur_level);
- Tensor hidden_gates_value;
- optional<int64_t> hidden_gates_bdim;
- std::tie(hidden_gates_value, hidden_gates_bdim) = unwrapTensorAtLevel(hidden_gates, cur_level);
- Tensor hx_value;
- optional<int64_t> hx_bdim;
- std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
- optional<Tensor> input_bias_value;
- optional<int64_t> input_bias_bdim;
- if (input_bias) {
- std::tie(input_bias_value, input_bias_bdim) = unwrapTensorAtLevel(input_bias.value(), cur_level);
- }
- optional<Tensor> hidden_bias_value;
- optional<int64_t> hidden_bias_bdim;
- if (hidden_bias) {
- std::tie(hidden_bias_value, hidden_bias_bdim) = unwrapTensorAtLevel(hidden_bias.value(), cur_level);
- }
- auto results = batch_rule(grad_hy_value, grad_hy_bdim, input_gates_value, input_gates_bdim, hidden_gates_value, hidden_gates_bdim, hx_value, hx_bdim, input_bias_value, input_bias_bdim, hidden_bias_value, hidden_bias_bdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> lstm_input_generated_plumbing(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
- return at::_ops::lstm_input::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- auto results = batch_rule(input_value, input_bdim, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> lstm_data_generated_plumbing(const at::Tensor & data, const at::Tensor & batch_sizes, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
- return at::_ops::lstm_data::call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
- }
- Tensor data_value;
- optional<int64_t> data_bdim;
- std::tie(data_value, data_bdim) = unwrapTensorAtLevel(data, cur_level);
- Tensor batch_sizes_value;
- optional<int64_t> batch_sizes_bdim;
- std::tie(batch_sizes_value, batch_sizes_bdim) = unwrapTensorAtLevel(batch_sizes, cur_level);
- auto results = batch_rule(data_value, data_bdim, batch_sizes_value, batch_sizes_bdim, hx, params, has_biases, num_layers, dropout, train, bidirectional);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> gru_input_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
- return at::_ops::gru_input::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor hx_value;
- optional<int64_t> hx_bdim;
- std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
- auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> gru_data_generated_plumbing(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
- return at::_ops::gru_data::call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
- }
- Tensor data_value;
- optional<int64_t> data_bdim;
- std::tie(data_value, data_bdim) = unwrapTensorAtLevel(data, cur_level);
- Tensor batch_sizes_value;
- optional<int64_t> batch_sizes_bdim;
- std::tie(batch_sizes_value, batch_sizes_bdim) = unwrapTensorAtLevel(batch_sizes, cur_level);
- Tensor hx_value;
- optional<int64_t> hx_bdim;
- std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
- auto results = batch_rule(data_value, data_bdim, batch_sizes_value, batch_sizes_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> rnn_tanh_input_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
- return at::_ops::rnn_tanh_input::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor hx_value;
- optional<int64_t> hx_bdim;
- std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
- auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> rnn_tanh_data_generated_plumbing(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
- return at::_ops::rnn_tanh_data::call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
- }
- Tensor data_value;
- optional<int64_t> data_bdim;
- std::tie(data_value, data_bdim) = unwrapTensorAtLevel(data, cur_level);
- Tensor batch_sizes_value;
- optional<int64_t> batch_sizes_bdim;
- std::tie(batch_sizes_value, batch_sizes_bdim) = unwrapTensorAtLevel(batch_sizes, cur_level);
- Tensor hx_value;
- optional<int64_t> hx_bdim;
- std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
- auto results = batch_rule(data_value, data_bdim, batch_sizes_value, batch_sizes_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> rnn_relu_input_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
- return at::_ops::rnn_relu_input::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor hx_value;
- optional<int64_t> hx_bdim;
- std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
- auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> rnn_relu_data_generated_plumbing(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
- return at::_ops::rnn_relu_data::call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
- }
- Tensor data_value;
- optional<int64_t> data_bdim;
- std::tie(data_value, data_bdim) = unwrapTensorAtLevel(data, cur_level);
- Tensor batch_sizes_value;
- optional<int64_t> batch_sizes_bdim;
- std::tie(batch_sizes_value, batch_sizes_bdim) = unwrapTensorAtLevel(batch_sizes, cur_level);
- Tensor hx_value;
- optional<int64_t> hx_bdim;
- std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
- auto results = batch_rule(data_value, data_bdim, batch_sizes_value, batch_sizes_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> lstm_cell_generated_plumbing(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level)) {
- return at::_ops::lstm_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor w_ih_value;
- optional<int64_t> w_ih_bdim;
- std::tie(w_ih_value, w_ih_bdim) = unwrapTensorAtLevel(w_ih, cur_level);
- Tensor w_hh_value;
- optional<int64_t> w_hh_bdim;
- std::tie(w_hh_value, w_hh_bdim) = unwrapTensorAtLevel(w_hh, cur_level);
- optional<Tensor> b_ih_value;
- optional<int64_t> b_ih_bdim;
- if (b_ih) {
- std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih.value(), cur_level);
- }
- optional<Tensor> b_hh_value;
- optional<int64_t> b_hh_bdim;
- if (b_hh) {
- std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, hx, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor gru_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level)) {
- return at::_ops::gru_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor hx_value;
- optional<int64_t> hx_bdim;
- std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
- Tensor w_ih_value;
- optional<int64_t> w_ih_bdim;
- std::tie(w_ih_value, w_ih_bdim) = unwrapTensorAtLevel(w_ih, cur_level);
- Tensor w_hh_value;
- optional<int64_t> w_hh_bdim;
- std::tie(w_hh_value, w_hh_bdim) = unwrapTensorAtLevel(w_hh, cur_level);
- optional<Tensor> b_ih_value;
- optional<int64_t> b_ih_bdim;
- if (b_ih) {
- std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih.value(), cur_level);
- }
- optional<Tensor> b_hh_value;
- optional<int64_t> b_hh_bdim;
- if (b_hh) {
- std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor rnn_tanh_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level)) {
- return at::_ops::rnn_tanh_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor hx_value;
- optional<int64_t> hx_bdim;
- std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
- Tensor w_ih_value;
- optional<int64_t> w_ih_bdim;
- std::tie(w_ih_value, w_ih_bdim) = unwrapTensorAtLevel(w_ih, cur_level);
- Tensor w_hh_value;
- optional<int64_t> w_hh_bdim;
- std::tie(w_hh_value, w_hh_bdim) = unwrapTensorAtLevel(w_hh, cur_level);
- optional<Tensor> b_ih_value;
- optional<int64_t> b_ih_bdim;
- if (b_ih) {
- std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih.value(), cur_level);
- }
- optional<Tensor> b_hh_value;
- optional<int64_t> b_hh_bdim;
- if (b_hh) {
- std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor rnn_relu_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level)) {
- return at::_ops::rnn_relu_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor hx_value;
- optional<int64_t> hx_bdim;
- std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
- Tensor w_ih_value;
- optional<int64_t> w_ih_bdim;
- std::tie(w_ih_value, w_ih_bdim) = unwrapTensorAtLevel(w_ih, cur_level);
- Tensor w_hh_value;
- optional<int64_t> w_hh_bdim;
- std::tie(w_hh_value, w_hh_bdim) = unwrapTensorAtLevel(w_hh, cur_level);
- optional<Tensor> b_ih_value;
- optional<int64_t> b_ih_bdim;
- if (b_ih) {
- std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih.value(), cur_level);
- }
- optional<Tensor> b_hh_value;
- optional<int64_t> b_hh_bdim;
- if (b_hh) {
- std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> quantized_lstm_cell_generated_plumbing(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level) && !isBatchedAtLevel(packed_ih, cur_level) && !isBatchedAtLevel(packed_hh, cur_level) && !isBatchedAtLevel(col_offsets_ih, cur_level) && !isBatchedAtLevel(col_offsets_hh, cur_level)) {
- return at::_ops::quantized_lstm_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor w_ih_value;
- optional<int64_t> w_ih_bdim;
- std::tie(w_ih_value, w_ih_bdim) = unwrapTensorAtLevel(w_ih, cur_level);
- Tensor w_hh_value;
- optional<int64_t> w_hh_bdim;
- std::tie(w_hh_value, w_hh_bdim) = unwrapTensorAtLevel(w_hh, cur_level);
- Tensor b_ih_value;
- optional<int64_t> b_ih_bdim;
- std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih, cur_level);
- Tensor b_hh_value;
- optional<int64_t> b_hh_bdim;
- std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh, cur_level);
- Tensor packed_ih_value;
- optional<int64_t> packed_ih_bdim;
- std::tie(packed_ih_value, packed_ih_bdim) = unwrapTensorAtLevel(packed_ih, cur_level);
- Tensor packed_hh_value;
- optional<int64_t> packed_hh_bdim;
- std::tie(packed_hh_value, packed_hh_bdim) = unwrapTensorAtLevel(packed_hh, cur_level);
- Tensor col_offsets_ih_value;
- optional<int64_t> col_offsets_ih_bdim;
- std::tie(col_offsets_ih_value, col_offsets_ih_bdim) = unwrapTensorAtLevel(col_offsets_ih, cur_level);
- Tensor col_offsets_hh_value;
- optional<int64_t> col_offsets_hh_bdim;
- std::tie(col_offsets_hh_value, col_offsets_hh_bdim) = unwrapTensorAtLevel(col_offsets_hh, cur_level);
- auto results = batch_rule(input_value, input_bdim, hx, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim, packed_ih_value, packed_ih_bdim, packed_hh_value, packed_hh_bdim, col_offsets_ih_value, col_offsets_ih_bdim, col_offsets_hh_value, col_offsets_hh_bdim, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor quantized_gru_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level) && !isBatchedAtLevel(packed_ih, cur_level) && !isBatchedAtLevel(packed_hh, cur_level) && !isBatchedAtLevel(col_offsets_ih, cur_level) && !isBatchedAtLevel(col_offsets_hh, cur_level)) {
- return at::_ops::quantized_gru_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor hx_value;
- optional<int64_t> hx_bdim;
- std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
- Tensor w_ih_value;
- optional<int64_t> w_ih_bdim;
- std::tie(w_ih_value, w_ih_bdim) = unwrapTensorAtLevel(w_ih, cur_level);
- Tensor w_hh_value;
- optional<int64_t> w_hh_bdim;
- std::tie(w_hh_value, w_hh_bdim) = unwrapTensorAtLevel(w_hh, cur_level);
- Tensor b_ih_value;
- optional<int64_t> b_ih_bdim;
- std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih, cur_level);
- Tensor b_hh_value;
- optional<int64_t> b_hh_bdim;
- std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh, cur_level);
- Tensor packed_ih_value;
- optional<int64_t> packed_ih_bdim;
- std::tie(packed_ih_value, packed_ih_bdim) = unwrapTensorAtLevel(packed_ih, cur_level);
- Tensor packed_hh_value;
- optional<int64_t> packed_hh_bdim;
- std::tie(packed_hh_value, packed_hh_bdim) = unwrapTensorAtLevel(packed_hh, cur_level);
- Tensor col_offsets_ih_value;
- optional<int64_t> col_offsets_ih_bdim;
- std::tie(col_offsets_ih_value, col_offsets_ih_bdim) = unwrapTensorAtLevel(col_offsets_ih, cur_level);
- Tensor col_offsets_hh_value;
- optional<int64_t> col_offsets_hh_bdim;
- std::tie(col_offsets_hh_value, col_offsets_hh_bdim) = unwrapTensorAtLevel(col_offsets_hh, cur_level);
- auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim, packed_ih_value, packed_ih_bdim, packed_hh_value, packed_hh_bdim, col_offsets_ih_value, col_offsets_ih_bdim, col_offsets_hh_value, col_offsets_hh_bdim, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor quantized_rnn_relu_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level) && !isBatchedAtLevel(packed_ih, cur_level) && !isBatchedAtLevel(packed_hh, cur_level) && !isBatchedAtLevel(col_offsets_ih, cur_level) && !isBatchedAtLevel(col_offsets_hh, cur_level)) {
- return at::_ops::quantized_rnn_relu_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor hx_value;
- optional<int64_t> hx_bdim;
- std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
- Tensor w_ih_value;
- optional<int64_t> w_ih_bdim;
- std::tie(w_ih_value, w_ih_bdim) = unwrapTensorAtLevel(w_ih, cur_level);
- Tensor w_hh_value;
- optional<int64_t> w_hh_bdim;
- std::tie(w_hh_value, w_hh_bdim) = unwrapTensorAtLevel(w_hh, cur_level);
- Tensor b_ih_value;
- optional<int64_t> b_ih_bdim;
- std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih, cur_level);
- Tensor b_hh_value;
- optional<int64_t> b_hh_bdim;
- std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh, cur_level);
- Tensor packed_ih_value;
- optional<int64_t> packed_ih_bdim;
- std::tie(packed_ih_value, packed_ih_bdim) = unwrapTensorAtLevel(packed_ih, cur_level);
- Tensor packed_hh_value;
- optional<int64_t> packed_hh_bdim;
- std::tie(packed_hh_value, packed_hh_bdim) = unwrapTensorAtLevel(packed_hh, cur_level);
- Tensor col_offsets_ih_value;
- optional<int64_t> col_offsets_ih_bdim;
- std::tie(col_offsets_ih_value, col_offsets_ih_bdim) = unwrapTensorAtLevel(col_offsets_ih, cur_level);
- Tensor col_offsets_hh_value;
- optional<int64_t> col_offsets_hh_bdim;
- std::tie(col_offsets_hh_value, col_offsets_hh_bdim) = unwrapTensorAtLevel(col_offsets_hh, cur_level);
- auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim, packed_ih_value, packed_ih_bdim, packed_hh_value, packed_hh_bdim, col_offsets_ih_value, col_offsets_ih_bdim, col_offsets_hh_value, col_offsets_hh_bdim, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor quantized_rnn_tanh_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level) && !isBatchedAtLevel(packed_ih, cur_level) && !isBatchedAtLevel(packed_hh, cur_level) && !isBatchedAtLevel(col_offsets_ih, cur_level) && !isBatchedAtLevel(col_offsets_hh, cur_level)) {
- return at::_ops::quantized_rnn_tanh_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor hx_value;
- optional<int64_t> hx_bdim;
- std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
- Tensor w_ih_value;
- optional<int64_t> w_ih_bdim;
- std::tie(w_ih_value, w_ih_bdim) = unwrapTensorAtLevel(w_ih, cur_level);
- Tensor w_hh_value;
- optional<int64_t> w_hh_bdim;
- std::tie(w_hh_value, w_hh_bdim) = unwrapTensorAtLevel(w_hh, cur_level);
- Tensor b_ih_value;
- optional<int64_t> b_ih_bdim;
- std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih, cur_level);
- Tensor b_hh_value;
- optional<int64_t> b_hh_bdim;
- std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh, cur_level);
- Tensor packed_ih_value;
- optional<int64_t> packed_ih_bdim;
- std::tie(packed_ih_value, packed_ih_bdim) = unwrapTensorAtLevel(packed_ih, cur_level);
- Tensor packed_hh_value;
- optional<int64_t> packed_hh_bdim;
- std::tie(packed_hh_value, packed_hh_bdim) = unwrapTensorAtLevel(packed_hh, cur_level);
- Tensor col_offsets_ih_value;
- optional<int64_t> col_offsets_ih_bdim;
- std::tie(col_offsets_ih_value, col_offsets_ih_bdim) = unwrapTensorAtLevel(col_offsets_ih, cur_level);
- Tensor col_offsets_hh_value;
- optional<int64_t> col_offsets_hh_bdim;
- std::tie(col_offsets_hh_value, col_offsets_hh_bdim) = unwrapTensorAtLevel(col_offsets_hh, cur_level);
- auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim, packed_ih_value, packed_ih_bdim, packed_hh_value, packed_hh_bdim, col_offsets_ih_value, col_offsets_ih_bdim, col_offsets_hh_value, col_offsets_hh_bdim, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> _pack_padded_sequence_generated_plumbing(const at::Tensor & input, const at::Tensor & lengths, bool batch_first) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(lengths, cur_level)) {
- return at::_ops::_pack_padded_sequence::call(input, lengths, batch_first);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor lengths_value;
- optional<int64_t> lengths_bdim;
- std::tie(lengths_value, lengths_bdim) = unwrapTensorAtLevel(lengths, cur_level);
- auto results = batch_rule(input_value, input_bdim, lengths_value, lengths_bdim, batch_first);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _pack_padded_sequence_backward_generated_plumbing(const at::Tensor & grad, c10::SymIntArrayRef input_size, const at::Tensor & batch_sizes, bool batch_first) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level)) {
- return at::_ops::_pack_padded_sequence_backward::call(grad, input_size, batch_sizes, batch_first);
- }
- Tensor grad_value;
- optional<int64_t> grad_bdim;
- std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
- Tensor batch_sizes_value;
- optional<int64_t> batch_sizes_bdim;
- std::tie(batch_sizes_value, batch_sizes_bdim) = unwrapTensorAtLevel(batch_sizes, cur_level);
- auto results = batch_rule(grad_value, grad_bdim, input_size, batch_sizes_value, batch_sizes_bdim, batch_first);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> _pad_packed_sequence_generated_plumbing(const at::Tensor & data, const at::Tensor & batch_sizes, bool batch_first, const at::Scalar & padding_value, int64_t total_length) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level)) {
- return at::_ops::_pad_packed_sequence::call(data, batch_sizes, batch_first, padding_value, total_length);
- }
- Tensor data_value;
- optional<int64_t> data_bdim;
- std::tie(data_value, data_bdim) = unwrapTensorAtLevel(data, cur_level);
- Tensor batch_sizes_value;
- optional<int64_t> batch_sizes_bdim;
- std::tie(batch_sizes_value, batch_sizes_bdim) = unwrapTensorAtLevel(batch_sizes, cur_level);
- auto results = batch_rule(data_value, data_bdim, batch_sizes_value, batch_sizes_bdim, batch_first, padding_value, total_length);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & set__source_Storage_generated_plumbing(at::Tensor & self, at::Storage source) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::set__source_Storage::call(self, source);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, source);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & set__source_Storage_storage_offset_generated_plumbing(at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::set__source_Storage_storage_offset::call(self, source, storage_offset, size, stride);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, source, storage_offset, size, stride);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & set__source_Tensor_storage_offset_generated_plumbing(at::Tensor & self, const at::Tensor & source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(source, cur_level)) {
- return at::_ops::set__source_Tensor_storage_offset::call(self, source, storage_offset, size, stride);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor source_value;
- optional<int64_t> source_bdim;
- std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
- batch_rule(self_value, self_bdim, source_value, source_bdim, storage_offset, size, stride);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & set__source_Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & source) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(source, cur_level)) {
- return at::_ops::set__source_Tensor::call(self, source);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor source_value;
- optional<int64_t> source_bdim;
- std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
- batch_rule(self_value, self_bdim, source_value, source_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & set__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::set_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor lift_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::lift::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor lift_fresh_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::lift_fresh::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor lift_fresh_copy_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::lift_fresh_copy::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & masked_fill__Scalar_generated_plumbing(at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
- return at::_ops::masked_fill__Scalar::call(self, mask, value);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor mask_value;
- optional<int64_t> mask_bdim;
- std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
- batch_rule(self_value, self_bdim, mask_value, mask_bdim, value);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor masked_fill_Scalar_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
- return at::_ops::masked_fill_Scalar::call(self, mask, value);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor mask_value;
- optional<int64_t> mask_bdim;
- std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
- auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, value);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & masked_fill__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level) && !isBatchedAtLevel(value, cur_level)) {
- return at::_ops::masked_fill__Tensor::call(self, mask, value);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor mask_value;
- optional<int64_t> mask_bdim;
- std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
- Tensor value_value;
- optional<int64_t> value_bdim;
- std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
- batch_rule(self_value, self_bdim, mask_value, mask_bdim, value_value, value_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor masked_fill_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level) && !isBatchedAtLevel(value, cur_level)) {
- return at::_ops::masked_fill_Tensor::call(self, mask, value);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor mask_value;
- optional<int64_t> mask_bdim;
- std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
- Tensor value_value;
- optional<int64_t> value_bdim;
- std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
- auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, value_value, value_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & masked_scatter__generated_plumbing(at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level) && !isBatchedAtLevel(source, cur_level)) {
- return at::_ops::masked_scatter_::call(self, mask, source);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor mask_value;
- optional<int64_t> mask_bdim;
- std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
- Tensor source_value;
- optional<int64_t> source_bdim;
- std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
- batch_rule(self_value, self_bdim, mask_value, mask_bdim, source_value, source_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor masked_scatter_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level) && !isBatchedAtLevel(source, cur_level)) {
- return at::_ops::masked_scatter::call(self, mask, source);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor mask_value;
- optional<int64_t> mask_bdim;
- std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
- Tensor source_value;
- optional<int64_t> source_bdim;
- std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
- auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, source_value, source_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _masked_softmax_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, c10::optional<int64_t> dim, c10::optional<int64_t> mask_type) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
- return at::_ops::_masked_softmax::call(self, mask, dim, mask_type);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor mask_value;
- optional<int64_t> mask_bdim;
- std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
- auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, dim, mask_type);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _masked_softmax_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, c10::optional<int64_t> dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
- return at::_ops::_masked_softmax_backward::call(grad_output, output, mask, dim);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor output_value;
- optional<int64_t> output_bdim;
- std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
- Tensor mask_value;
- optional<int64_t> mask_bdim;
- std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, mask_value, mask_bdim, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor view_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::view::call(self, size);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, size);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor view_dtype_generated_plumbing(const at::Tensor & self, at::ScalarType dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::view_dtype::call(self, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & put__generated_plumbing(at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
- return at::_ops::put_::call(self, index, source, accumulate);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- Tensor source_value;
- optional<int64_t> source_bdim;
- std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
- batch_rule(self_value, self_bdim, index_value, index_bdim, source_value, source_bdim, accumulate);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor put_generated_plumbing(const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
- return at::_ops::put::call(self, index, source, accumulate);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- Tensor source_value;
- optional<int64_t> source_bdim;
- std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
- auto results = batch_rule(self_value, self_bdim, index_value, index_bdim, source_value, source_bdim, accumulate);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & index_add__generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
- return at::_ops::index_add_::call(self, dim, index, source, alpha);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- Tensor source_value;
- optional<int64_t> source_bdim;
- std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
- batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim, alpha);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor index_add_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
- return at::_ops::index_add::call(self, dim, index, source, alpha);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- Tensor source_value;
- optional<int64_t> source_bdim;
- std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim, alpha);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor index_add_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
- return at::_ops::index_add_dimname::call(self, dim, index, source, alpha);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- Tensor source_value;
- optional<int64_t> source_bdim;
- std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim, alpha);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & index_reduce__generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
- return at::_ops::index_reduce_::call(self, dim, index, source, reduce, include_self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- Tensor source_value;
- optional<int64_t> source_bdim;
- std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
- batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim, reduce, include_self);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor index_reduce_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
- return at::_ops::index_reduce::call(self, dim, index, source, reduce, include_self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- Tensor source_value;
- optional<int64_t> source_bdim;
- std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim, reduce, include_self);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & index_fill__int_Scalar_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
- return at::_ops::index_fill__int_Scalar::call(self, dim, index, value);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor index_fill_int_Scalar_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
- return at::_ops::index_fill_int_Scalar::call(self, dim, index, value);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & index_fill__int_Tensor_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(value, cur_level)) {
- return at::_ops::index_fill__int_Tensor::call(self, dim, index, value);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- Tensor value_value;
- optional<int64_t> value_bdim;
- std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
- batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value_value, value_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor index_fill_int_Tensor_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(value, cur_level)) {
- return at::_ops::index_fill_int_Tensor::call(self, dim, index, value);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- Tensor value_value;
- optional<int64_t> value_bdim;
- std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value_value, value_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & index_fill__Dimname_Scalar_generated_plumbing(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
- return at::_ops::index_fill__Dimname_Scalar::call(self, dim, index, value);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & index_fill__Dimname_Tensor_generated_plumbing(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(value, cur_level)) {
- return at::_ops::index_fill__Dimname_Tensor::call(self, dim, index, value);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- Tensor value_value;
- optional<int64_t> value_bdim;
- std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
- batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value_value, value_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor index_fill_Dimname_Scalar_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
- return at::_ops::index_fill_Dimname_Scalar::call(self, dim, index, value);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor index_fill_Dimname_Tensor_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(value, cur_level)) {
- return at::_ops::index_fill_Dimname_Tensor::call(self, dim, index, value);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- Tensor value_value;
- optional<int64_t> value_bdim;
- std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value_value, value_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor scatter_src_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
- return at::_ops::scatter_src::call(self, dim, index, src);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- Tensor src_value;
- optional<int64_t> src_bdim;
- std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & scatter__src_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
- return at::_ops::scatter__src::call(self, dim, index, src);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- Tensor src_value;
- optional<int64_t> src_bdim;
- std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
- batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor scatter_value_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
- return at::_ops::scatter_value::call(self, dim, index, value);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & scatter__value_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
- return at::_ops::scatter__value::call(self, dim, index, value);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor scatter_reduce_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
- return at::_ops::scatter_reduce::call(self, dim, index, src, reduce);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- Tensor src_value;
- optional<int64_t> src_bdim;
- std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim, reduce);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & scatter__reduce_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
- return at::_ops::scatter__reduce::call(self, dim, index, src, reduce);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- Tensor src_value;
- optional<int64_t> src_bdim;
- std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
- batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim, reduce);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor scatter_value_reduce_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
- return at::_ops::scatter_value_reduce::call(self, dim, index, value, reduce);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value, reduce);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & scatter__value_reduce_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
- return at::_ops::scatter__value_reduce::call(self, dim, index, value, reduce);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value, reduce);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor scatter_dimname_src_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
- return at::_ops::scatter_dimname_src::call(self, dim, index, src);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- Tensor src_value;
- optional<int64_t> src_bdim;
- std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor scatter_dimname_value_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
- return at::_ops::scatter_dimname_value::call(self, dim, index, value);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor scatter_add_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
- return at::_ops::scatter_add::call(self, dim, index, src);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- Tensor src_value;
- optional<int64_t> src_bdim;
- std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & scatter_add__generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
- return at::_ops::scatter_add_::call(self, dim, index, src);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- Tensor src_value;
- optional<int64_t> src_bdim;
- std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
- batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor scatter_add_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
- return at::_ops::scatter_add_dimname::call(self, dim, index, src);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- Tensor src_value;
- optional<int64_t> src_bdim;
- std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor scatter_reduce_two_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
- return at::_ops::scatter_reduce_two::call(self, dim, index, src, reduce, include_self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- Tensor src_value;
- optional<int64_t> src_bdim;
- std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim, reduce, include_self);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & scatter_reduce__two_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
- return at::_ops::scatter_reduce__two::call(self, dim, index, src, reduce, include_self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- Tensor src_value;
- optional<int64_t> src_bdim;
- std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
- batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim, reduce, include_self);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & eq__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::eq__Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, other);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & eq__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::eq__Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor bitwise_and_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::bitwise_and_Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor bitwise_and_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(other, cur_level)) {
- return at::_ops::bitwise_and_Scalar_Tensor::call(self, other);
- }
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor bitwise_and_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::bitwise_and_Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & bitwise_and__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::bitwise_and__Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, other);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & bitwise_and__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::bitwise_and__Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor __and___Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::__and___Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor __and___Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::__and___Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & __iand___Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::__iand___Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, other);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & __iand___Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::__iand___Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor bitwise_or_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::bitwise_or_Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor bitwise_or_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(other, cur_level)) {
- return at::_ops::bitwise_or_Scalar_Tensor::call(self, other);
- }
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor bitwise_or_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::bitwise_or_Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & bitwise_or__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::bitwise_or__Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, other);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & bitwise_or__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::bitwise_or__Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor __or___Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::__or___Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor __or___Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::__or___Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & __ior___Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::__ior___Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, other);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & __ior___Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::__ior___Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor bitwise_xor_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::bitwise_xor_Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor bitwise_xor_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(other, cur_level)) {
- return at::_ops::bitwise_xor_Scalar_Tensor::call(self, other);
- }
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor bitwise_xor_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::bitwise_xor_Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & bitwise_xor__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::bitwise_xor__Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, other);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & bitwise_xor__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::bitwise_xor__Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor __xor___Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::__xor___Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor __xor___Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::__xor___Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & __ixor___Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::__ixor___Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, other);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & __ixor___Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::__ixor___Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor __lshift___Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::__lshift___Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor __lshift___Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::__lshift___Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & __ilshift___Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::__ilshift___Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, other);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & __ilshift___Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::__ilshift___Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor bitwise_left_shift_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::bitwise_left_shift_Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & bitwise_left_shift__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::bitwise_left_shift__Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor bitwise_left_shift_Tensor_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::bitwise_left_shift_Tensor_Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & bitwise_left_shift__Tensor_Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::bitwise_left_shift__Tensor_Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, other);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor bitwise_left_shift_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(other, cur_level)) {
- return at::_ops::bitwise_left_shift_Scalar_Tensor::call(self, other);
- }
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor __rshift___Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::__rshift___Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor __rshift___Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::__rshift___Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & __irshift___Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::__irshift___Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, other);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & __irshift___Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::__irshift___Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor bitwise_right_shift_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::bitwise_right_shift_Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & bitwise_right_shift__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::bitwise_right_shift__Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor bitwise_right_shift_Tensor_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::bitwise_right_shift_Tensor_Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & bitwise_right_shift__Tensor_Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::bitwise_right_shift__Tensor_Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, other);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor bitwise_right_shift_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(other, cur_level)) {
- return at::_ops::bitwise_right_shift_Scalar_Tensor::call(self, other);
- }
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & tril__generated_plumbing(at::Tensor & self, int64_t diagonal) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::tril_::call(self, diagonal);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, diagonal);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & triu__generated_plumbing(at::Tensor & self, int64_t diagonal) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::triu_::call(self, diagonal);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, diagonal);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & digamma__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::digamma_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & lerp__Scalar_generated_plumbing(at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(end, cur_level)) {
- return at::_ops::lerp__Scalar::call(self, end, weight);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor end_value;
- optional<int64_t> end_bdim;
- std::tie(end_value, end_bdim) = unwrapTensorAtLevel(end, cur_level);
- batch_rule(self_value, self_bdim, end_value, end_bdim, weight);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & lerp__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(end, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
- return at::_ops::lerp__Tensor::call(self, end, weight);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor end_value;
- optional<int64_t> end_bdim;
- std::tie(end_value, end_bdim) = unwrapTensorAtLevel(end, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- batch_rule(self_value, self_bdim, end_value, end_bdim, weight_value, weight_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & addbmm__generated_plumbing(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(batch1, cur_level) && !isBatchedAtLevel(batch2, cur_level)) {
- return at::_ops::addbmm_::call(self, batch1, batch2, beta, alpha);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor batch1_value;
- optional<int64_t> batch1_bdim;
- std::tie(batch1_value, batch1_bdim) = unwrapTensorAtLevel(batch1, cur_level);
- Tensor batch2_value;
- optional<int64_t> batch2_bdim;
- std::tie(batch2_value, batch2_bdim) = unwrapTensorAtLevel(batch2, cur_level);
- batch_rule(self_value, self_bdim, batch1_value, batch1_bdim, batch2_value, batch2_bdim, beta, alpha);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor addbmm_generated_plumbing(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(batch1, cur_level) && !isBatchedAtLevel(batch2, cur_level)) {
- return at::_ops::addbmm::call(self, batch1, batch2, beta, alpha);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor batch1_value;
- optional<int64_t> batch1_bdim;
- std::tie(batch1_value, batch1_bdim) = unwrapTensorAtLevel(batch1, cur_level);
- Tensor batch2_value;
- optional<int64_t> batch2_bdim;
- std::tie(batch2_value, batch2_bdim) = unwrapTensorAtLevel(batch2, cur_level);
- auto results = batch_rule(self_value, self_bdim, batch1_value, batch1_bdim, batch2_value, batch2_bdim, beta, alpha);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & random__from_generated_plumbing(at::Tensor & self, int64_t from, c10::optional<int64_t> to, c10::optional<at::Generator> generator) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::random__from::call(self, from, to, generator);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, from, to, generator);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & random__to_generated_plumbing(at::Tensor & self, int64_t to, c10::optional<at::Generator> generator) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::random__to::call(self, to, generator);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, to, generator);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & random__generated_plumbing(at::Tensor & self, c10::optional<at::Generator> generator) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::random_::call(self, generator);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, generator);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & uniform__generated_plumbing(at::Tensor & self, double from, double to, c10::optional<at::Generator> generator) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::uniform_::call(self, from, to, generator);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, from, to, generator);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & cauchy__generated_plumbing(at::Tensor & self, double median, double sigma, c10::optional<at::Generator> generator) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::cauchy_::call(self, median, sigma, generator);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, median, sigma, generator);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & log_normal__generated_plumbing(at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::log_normal_::call(self, mean, std, generator);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, mean, std, generator);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & exponential__generated_plumbing(at::Tensor & self, double lambd, c10::optional<at::Generator> generator) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::exponential_::call(self, lambd, generator);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, lambd, generator);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & geometric__generated_plumbing(at::Tensor & self, double p, c10::optional<at::Generator> generator) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::geometric_::call(self, p, generator);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, p, generator);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor diag_generated_plumbing(const at::Tensor & self, int64_t diagonal) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::diag::call(self, diagonal);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, diagonal);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor cross_generated_plumbing(const at::Tensor & self, const at::Tensor & other, c10::optional<int64_t> dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::cross::call(self, other, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor triu_generated_plumbing(const at::Tensor & self, int64_t diagonal) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::triu::call(self, diagonal);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, diagonal);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor tril_generated_plumbing(const at::Tensor & self, int64_t diagonal) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::tril::call(self, diagonal);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, diagonal);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor trace_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::trace::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor trace_backward_generated_plumbing(const at::Tensor & grad, c10::SymIntArrayRef sizes) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad, cur_level)) {
- return at::_ops::trace_backward::call(grad, sizes);
- }
- Tensor grad_value;
- optional<int64_t> grad_bdim;
- std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
- auto results = batch_rule(grad_value, grad_bdim, sizes);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor ne_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::ne_Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor ne_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::ne_Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & ne__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::ne__Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, other);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & ne__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::ne__Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor not_equal_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::not_equal_Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor not_equal_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::not_equal_Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & not_equal__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::not_equal__Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, other);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & not_equal__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::not_equal__Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor eq_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::eq_Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor eq_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::eq_Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor ge_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::ge_Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor ge_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::ge_Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & ge__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::ge__Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, other);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & ge__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::ge__Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor greater_equal_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::greater_equal_Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor greater_equal_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::greater_equal_Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & greater_equal__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::greater_equal__Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, other);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & greater_equal__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::greater_equal__Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor le_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::le_Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor le_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::le_Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & le__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::le__Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, other);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & le__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::le__Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor less_equal_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::less_equal_Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor less_equal_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::less_equal_Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & less_equal__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::less_equal__Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, other);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & less_equal__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::less_equal__Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor gt_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::gt_Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor gt_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::gt_Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & gt__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::gt__Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, other);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & gt__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::gt__Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor greater_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::greater_Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor greater_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::greater_Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & greater__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::greater__Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, other);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & greater__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::greater__Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor lt_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::lt_Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor lt_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::lt_Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & lt__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::lt__Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, other);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & lt__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::lt__Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor less_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::less_Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor less_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::less_Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & less__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::less__Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, other);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & less__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::less__Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor take_generated_plumbing(const at::Tensor & self, const at::Tensor & index) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
- return at::_ops::take::call(self, index);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- auto results = batch_rule(self_value, self_bdim, index_value, index_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor take_along_dim_generated_plumbing(const at::Tensor & self, const at::Tensor & indices, c10::optional<int64_t> dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
- return at::_ops::take_along_dim::call(self, indices, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor indices_value;
- optional<int64_t> indices_bdim;
- std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
- auto results = batch_rule(self_value, self_bdim, indices_value, indices_bdim, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor index_select_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
- return at::_ops::index_select::call(self, dim, index);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor index_select_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
- return at::_ops::index_select_dimname::call(self, dim, index);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor index_select_backward_generated_plumbing(const at::Tensor & grad, c10::SymIntArrayRef self_sizes, int64_t dim, const at::Tensor & index) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(index, cur_level)) {
- return at::_ops::index_select_backward::call(grad, self_sizes, dim, index);
- }
- Tensor grad_value;
- optional<int64_t> grad_bdim;
- std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- auto results = batch_rule(grad_value, grad_bdim, self_sizes, dim, index_value, index_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor masked_select_generated_plumbing(const at::Tensor & self, const at::Tensor & mask) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
- return at::_ops::masked_select::call(self, mask);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor mask_value;
- optional<int64_t> mask_bdim;
- std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
- auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor masked_select_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & mask) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
- return at::_ops::masked_select_backward::call(grad, input, mask);
- }
- Tensor grad_value;
- optional<int64_t> grad_bdim;
- std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor mask_value;
- optional<int64_t> mask_bdim;
- std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
- auto results = batch_rule(grad_value, grad_bdim, input_value, input_bdim, mask_value, mask_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor nonzero_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::nonzero::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> nonzero_numpy_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::nonzero_numpy::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor argwhere_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::argwhere::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor gather_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
- return at::_ops::gather::call(self, dim, index, sparse_grad);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, sparse_grad);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor gather_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
- return at::_ops::gather_backward::call(grad, self, dim, index, sparse_grad);
- }
- Tensor grad_value;
- optional<int64_t> grad_bdim;
- std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, dim, index_value, index_bdim, sparse_grad);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor gather_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
- return at::_ops::gather_dimname::call(self, dim, index, sparse_grad);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, sparse_grad);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _gather_sparse_backward_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & grad) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(grad, cur_level)) {
- return at::_ops::_gather_sparse_backward::call(self, dim, index, grad);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor index_value;
- optional<int64_t> index_bdim;
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
- Tensor grad_value;
- optional<int64_t> grad_bdim;
- std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, grad_value, grad_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor addcmul_generated_plumbing(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
- return at::_ops::addcmul::call(self, tensor1, tensor2, value);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor tensor1_value;
- optional<int64_t> tensor1_bdim;
- std::tie(tensor1_value, tensor1_bdim) = unwrapTensorAtLevel(tensor1, cur_level);
- Tensor tensor2_value;
- optional<int64_t> tensor2_bdim;
- std::tie(tensor2_value, tensor2_bdim) = unwrapTensorAtLevel(tensor2, cur_level);
- auto results = batch_rule(self_value, self_bdim, tensor1_value, tensor1_bdim, tensor2_value, tensor2_bdim, value);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & addcmul__generated_plumbing(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
- return at::_ops::addcmul_::call(self, tensor1, tensor2, value);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor tensor1_value;
- optional<int64_t> tensor1_bdim;
- std::tie(tensor1_value, tensor1_bdim) = unwrapTensorAtLevel(tensor1, cur_level);
- Tensor tensor2_value;
- optional<int64_t> tensor2_bdim;
- std::tie(tensor2_value, tensor2_bdim) = unwrapTensorAtLevel(tensor2, cur_level);
- batch_rule(self_value, self_bdim, tensor1_value, tensor1_bdim, tensor2_value, tensor2_bdim, value);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor addcdiv_generated_plumbing(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
- return at::_ops::addcdiv::call(self, tensor1, tensor2, value);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor tensor1_value;
- optional<int64_t> tensor1_bdim;
- std::tie(tensor1_value, tensor1_bdim) = unwrapTensorAtLevel(tensor1, cur_level);
- Tensor tensor2_value;
- optional<int64_t> tensor2_bdim;
- std::tie(tensor2_value, tensor2_bdim) = unwrapTensorAtLevel(tensor2, cur_level);
- auto results = batch_rule(self_value, self_bdim, tensor1_value, tensor1_bdim, tensor2_value, tensor2_bdim, value);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & addcdiv__generated_plumbing(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
- return at::_ops::addcdiv_::call(self, tensor1, tensor2, value);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor tensor1_value;
- optional<int64_t> tensor1_bdim;
- std::tie(tensor1_value, tensor1_bdim) = unwrapTensorAtLevel(tensor1, cur_level);
- Tensor tensor2_value;
- optional<int64_t> tensor2_bdim;
- std::tie(tensor2_value, tensor2_bdim) = unwrapTensorAtLevel(tensor2, cur_level);
- batch_rule(self_value, self_bdim, tensor1_value, tensor1_bdim, tensor2_value, tensor2_bdim, value);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor cross_entropy_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, double label_smoothing) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
- return at::_ops::cross_entropy_loss::call(self, target, weight, reduction, ignore_index, label_smoothing);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor target_value;
- optional<int64_t> target_bdim;
- std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
- optional<Tensor> weight_value;
- optional<int64_t> weight_bdim;
- if (weight) {
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index, label_smoothing);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> triangular_solve_generated_plumbing(const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(A, cur_level)) {
- return at::_ops::triangular_solve::call(self, A, upper, transpose, unitriangular);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor A_value;
- optional<int64_t> A_bdim;
- std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
- auto results = batch_rule(self_value, self_bdim, A_value, A_bdim, upper, transpose, unitriangular);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _linalg_check_errors_generated_plumbing(const at::Tensor & info, c10::string_view api_name, bool is_matrix) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(info, cur_level)) {
- return at::_ops::_linalg_check_errors::call(info, api_name, is_matrix);
- }
- Tensor info_value;
- optional<int64_t> info_bdim;
- std::tie(info_value, info_bdim) = unwrapTensorAtLevel(info, cur_level);
- batch_rule(info_value, info_bdim, api_name, is_matrix);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor linalg_solve_triangular_generated_plumbing(const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(B, cur_level)) {
- return at::_ops::linalg_solve_triangular::call(self, B, upper, left, unitriangular);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor B_value;
- optional<int64_t> B_bdim;
- std::tie(B_value, B_bdim) = unwrapTensorAtLevel(B, cur_level);
- auto results = batch_rule(self_value, self_bdim, B_value, B_bdim, upper, left, unitriangular);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor linalg_vander_generated_plumbing(const at::Tensor & x, c10::optional<int64_t> N) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(x, cur_level)) {
- return at::_ops::linalg_vander::call(x, N);
- }
- Tensor x_value;
- optional<int64_t> x_bdim;
- std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
- auto results = batch_rule(x_value, x_bdim, N);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> svd_generated_plumbing(const at::Tensor & self, bool some, bool compute_uv) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::svd::call(self, some, compute_uv);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, some, compute_uv);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor swapaxes_generated_plumbing(const at::Tensor & self, int64_t axis0, int64_t axis1) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::swapaxes::call(self, axis0, axis1);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, axis0, axis1);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor swapdims_generated_plumbing(const at::Tensor & self, int64_t dim0, int64_t dim1) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::swapdims::call(self, dim0, dim1);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim0, dim1);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor cholesky_generated_plumbing(const at::Tensor & self, bool upper) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::cholesky::call(self, upper);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, upper);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor cholesky_solve_generated_plumbing(const at::Tensor & self, const at::Tensor & input2, bool upper) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(input2, cur_level)) {
- return at::_ops::cholesky_solve::call(self, input2, upper);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor input2_value;
- optional<int64_t> input2_bdim;
- std::tie(input2_value, input2_bdim) = unwrapTensorAtLevel(input2, cur_level);
- auto results = batch_rule(self_value, self_bdim, input2_value, input2_bdim, upper);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _cholesky_solve_helper_generated_plumbing(const at::Tensor & self, const at::Tensor & A, bool upper) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(A, cur_level)) {
- return at::_ops::_cholesky_solve_helper::call(self, A, upper);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor A_value;
- optional<int64_t> A_bdim;
- std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
- auto results = batch_rule(self_value, self_bdim, A_value, A_bdim, upper);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor cholesky_inverse_generated_plumbing(const at::Tensor & self, bool upper) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::cholesky_inverse::call(self, upper);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, upper);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> qr_generated_plumbing(const at::Tensor & self, bool some) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::qr::call(self, some);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, some);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> geqrf_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::geqrf::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor orgqr_generated_plumbing(const at::Tensor & self, const at::Tensor & input2) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(input2, cur_level)) {
- return at::_ops::orgqr::call(self, input2);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor input2_value;
- optional<int64_t> input2_bdim;
- std::tie(input2_value, input2_bdim) = unwrapTensorAtLevel(input2, cur_level);
- auto results = batch_rule(self_value, self_bdim, input2_value, input2_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor ormqr_generated_plumbing(const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(input2, cur_level) && !isBatchedAtLevel(input3, cur_level)) {
- return at::_ops::ormqr::call(self, input2, input3, left, transpose);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor input2_value;
- optional<int64_t> input2_bdim;
- std::tie(input2_value, input2_bdim) = unwrapTensorAtLevel(input2, cur_level);
- Tensor input3_value;
- optional<int64_t> input3_bdim;
- std::tie(input3_value, input3_bdim) = unwrapTensorAtLevel(input3, cur_level);
- auto results = batch_rule(self_value, self_bdim, input2_value, input2_bdim, input3_value, input3_bdim, left, transpose);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _lu_with_info_generated_plumbing(const at::Tensor & self, bool pivot, bool check_errors) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_lu_with_info::call(self, pivot, check_errors);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, pivot, check_errors);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor lu_solve_generated_plumbing(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(LU_data, cur_level) && !isBatchedAtLevel(LU_pivots, cur_level)) {
- return at::_ops::lu_solve::call(self, LU_data, LU_pivots);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor LU_data_value;
- optional<int64_t> LU_data_bdim;
- std::tie(LU_data_value, LU_data_bdim) = unwrapTensorAtLevel(LU_data, cur_level);
- Tensor LU_pivots_value;
- optional<int64_t> LU_pivots_bdim;
- std::tie(LU_pivots_value, LU_pivots_bdim) = unwrapTensorAtLevel(LU_pivots, cur_level);
- auto results = batch_rule(self_value, self_bdim, LU_data_value, LU_data_bdim, LU_pivots_value, LU_pivots_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> lu_unpack_generated_plumbing(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(LU_data, cur_level) && !isBatchedAtLevel(LU_pivots, cur_level)) {
- return at::_ops::lu_unpack::call(LU_data, LU_pivots, unpack_data, unpack_pivots);
- }
- Tensor LU_data_value;
- optional<int64_t> LU_data_bdim;
- std::tie(LU_data_value, LU_data_bdim) = unwrapTensorAtLevel(LU_data, cur_level);
- Tensor LU_pivots_value;
- optional<int64_t> LU_pivots_bdim;
- std::tie(LU_pivots_value, LU_pivots_bdim) = unwrapTensorAtLevel(LU_pivots, cur_level);
- auto results = batch_rule(LU_data_value, LU_data_bdim, LU_pivots_value, LU_pivots_bdim, unpack_data, unpack_pivots);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor multinomial_generated_plumbing(const at::Tensor & self, int64_t num_samples, bool replacement, c10::optional<at::Generator> generator) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::multinomial::call(self, num_samples, replacement, generator);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, num_samples, replacement, generator);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & lgamma__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::lgamma_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor lgamma_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::lgamma::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor digamma_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::digamma::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor polygamma_generated_plumbing(int64_t n, const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::polygamma::call(n, self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(n, self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & polygamma__generated_plumbing(at::Tensor & self, int64_t n) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::polygamma_::call(self, n);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, n);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor erfinv_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::erfinv::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & erfinv__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::erfinv_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor i0_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::i0::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & i0__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::i0_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor sign_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::sign::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & sign__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::sign_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor signbit_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::signbit::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor dist_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & p) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::dist::call(self, other, p);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, p);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & atan2__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::atan2_::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor atan2_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::atan2::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor arctan2_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::arctan2::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & arctan2__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::arctan2_::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor lerp_Scalar_generated_plumbing(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(end, cur_level)) {
- return at::_ops::lerp_Scalar::call(self, end, weight);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor end_value;
- optional<int64_t> end_bdim;
- std::tie(end_value, end_bdim) = unwrapTensorAtLevel(end, cur_level);
- auto results = batch_rule(self_value, self_bdim, end_value, end_bdim, weight);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor lerp_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(end, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
- return at::_ops::lerp_Tensor::call(self, end, weight);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor end_value;
- optional<int64_t> end_bdim;
- std::tie(end_value, end_bdim) = unwrapTensorAtLevel(end, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- auto results = batch_rule(self_value, self_bdim, end_value, end_bdim, weight_value, weight_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor histc_generated_plumbing(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::histc::call(self, bins, min, max);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, bins, min, max);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> histogram_bins_tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & bins, const c10::optional<at::Tensor> & weight, bool density) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(bins, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
- return at::_ops::histogram_bins_tensor::call(self, bins, weight, density);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor bins_value;
- optional<int64_t> bins_bdim;
- std::tie(bins_value, bins_bdim) = unwrapTensorAtLevel(bins, cur_level);
- optional<Tensor> weight_value;
- optional<int64_t> weight_bdim;
- if (weight) {
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, bins_value, bins_bdim, weight_value, weight_bdim, density);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> histogram_bin_ct_generated_plumbing(const at::Tensor & self, int64_t bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
- return at::_ops::histogram_bin_ct::call(self, bins, range, weight, density);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- optional<Tensor> weight_value;
- optional<int64_t> weight_bdim;
- if (weight) {
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _histogramdd_bin_edges_generated_plumbing(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
- return at::_ops::_histogramdd_bin_edges::call(self, bins, range, weight, density);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- optional<Tensor> weight_value;
- optional<int64_t> weight_bdim;
- if (weight) {
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _histogramdd_from_bin_cts_generated_plumbing(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
- return at::_ops::_histogramdd_from_bin_cts::call(self, bins, range, weight, density);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- optional<Tensor> weight_value;
- optional<int64_t> weight_bdim;
- if (weight) {
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _histogramdd_from_bin_tensors_generated_plumbing(const at::Tensor & self, at::TensorList bins, const c10::optional<at::Tensor> & weight, bool density) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(bins, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
- return at::_ops::_histogramdd_from_bin_tensors::call(self, bins, weight, density);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- optional<Tensor> weight_value;
- optional<int64_t> weight_bdim;
- if (weight) {
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, bins, weight_value, weight_bdim, density);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,::std::vector<at::Tensor>> histogramdd_generated_plumbing(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
- return at::_ops::histogramdd::call(self, bins, range, weight, density);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- optional<Tensor> weight_value;
- optional<int64_t> weight_bdim;
- if (weight) {
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,::std::vector<at::Tensor>> histogramdd_int_bins_generated_plumbing(const at::Tensor & self, int64_t bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
- return at::_ops::histogramdd_int_bins::call(self, bins, range, weight, density);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- optional<Tensor> weight_value;
- optional<int64_t> weight_bdim;
- if (weight) {
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,::std::vector<at::Tensor>> histogramdd_TensorList_bins_generated_plumbing(const at::Tensor & self, at::TensorList bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(bins, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
- return at::_ops::histogramdd_TensorList_bins::call(self, bins, range, weight, density);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- optional<Tensor> weight_value;
- optional<int64_t> weight_bdim;
- if (weight) {
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fmod_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::fmod_Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & fmod__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::fmod__Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, other);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fmod_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::fmod_Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & fmod__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::fmod__Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor hypot_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::hypot::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & hypot__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::hypot_::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor igamma_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::igamma::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & igamma__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::igamma_::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor igammac_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::igammac::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & igammac__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::igammac_::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor nextafter_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::nextafter::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & nextafter__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::nextafter_::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor remainder_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::remainder_Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & remainder__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::remainder__Scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, other);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor remainder_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::remainder_Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & remainder__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::remainder__Tensor::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- batch_rule(self_value, self_bdim, other_value, other_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor remainder_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(other, cur_level)) {
- return at::_ops::remainder_Scalar_Tensor::call(self, other);
- }
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor min_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::min::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fmin_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::fmin::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor max_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::max::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fmax_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::fmax::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor maximum_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::maximum::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor max_other_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::max_other::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor minimum_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::minimum::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor min_other_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::min_other::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor quantile_generated_plumbing(const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(q, cur_level)) {
- return at::_ops::quantile::call(self, q, dim, keepdim, interpolation);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor q_value;
- optional<int64_t> q_bdim;
- std::tie(q_value, q_bdim) = unwrapTensorAtLevel(q, cur_level);
- auto results = batch_rule(self_value, self_bdim, q_value, q_bdim, dim, keepdim, interpolation);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor quantile_scalar_generated_plumbing(const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::quantile_scalar::call(self, q, dim, keepdim, interpolation);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, q, dim, keepdim, interpolation);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor nanquantile_generated_plumbing(const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(q, cur_level)) {
- return at::_ops::nanquantile::call(self, q, dim, keepdim, interpolation);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor q_value;
- optional<int64_t> q_bdim;
- std::tie(q_value, q_bdim) = unwrapTensorAtLevel(q, cur_level);
- auto results = batch_rule(self_value, self_bdim, q_value, q_bdim, dim, keepdim, interpolation);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor nanquantile_scalar_generated_plumbing(const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::nanquantile_scalar::call(self, q, dim, keepdim, interpolation);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, q, dim, keepdim, interpolation);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> sort_generated_plumbing(const at::Tensor & self, int64_t dim, bool descending) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::sort::call(self, dim, descending);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, descending);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> sort_stable_generated_plumbing(const at::Tensor & self, c10::optional<bool> stable, int64_t dim, bool descending) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::sort_stable::call(self, stable, dim, descending);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, stable, dim, descending);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> sort_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool descending) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::sort_dimname::call(self, dim, descending);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, descending);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> sort_dimname_stable_generated_plumbing(const at::Tensor & self, c10::optional<bool> stable, at::Dimname dim, bool descending) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::sort_dimname_stable::call(self, stable, dim, descending);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, stable, dim, descending);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor msort_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::msort::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor argsort_generated_plumbing(const at::Tensor & self, int64_t dim, bool descending) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::argsort::call(self, dim, descending);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, descending);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor argsort_stable_generated_plumbing(const at::Tensor & self, bool stable, int64_t dim, bool descending) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::argsort_stable::call(self, stable, dim, descending);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, stable, dim, descending);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor argsort_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool descending) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::argsort_dimname::call(self, dim, descending);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, descending);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> topk_generated_plumbing(const at::Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::topk::call(self, k, dim, largest, sorted);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, k, dim, largest, sorted);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor all_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::all::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor any_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::any::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor renorm_generated_plumbing(const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::renorm::call(self, p, dim, maxnorm);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, p, dim, maxnorm);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & renorm__generated_plumbing(at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::renorm_::call(self, p, dim, maxnorm);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, p, dim, maxnorm);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor unfold_generated_plumbing(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::unfold::call(self, dimension, size, step);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dimension, size, step);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor unfold_backward_generated_plumbing(const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_in, cur_level)) {
- return at::_ops::unfold_backward::call(grad_in, input_sizes, dim, size, step);
- }
- Tensor grad_in_value;
- optional<int64_t> grad_in_bdim;
- std::tie(grad_in_value, grad_in_bdim) = unwrapTensorAtLevel(grad_in, cur_level);
- auto results = batch_rule(grad_in_value, grad_in_bdim, input_sizes, dim, size, step);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor pow_Tensor_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & exponent) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(exponent, cur_level)) {
- return at::_ops::pow_Tensor_Tensor::call(self, exponent);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor exponent_value;
- optional<int64_t> exponent_bdim;
- std::tie(exponent_value, exponent_bdim) = unwrapTensorAtLevel(exponent, cur_level);
- auto results = batch_rule(self_value, self_bdim, exponent_value, exponent_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor pow_Scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & exponent) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(exponent, cur_level)) {
- return at::_ops::pow_Scalar::call(self, exponent);
- }
- Tensor exponent_value;
- optional<int64_t> exponent_bdim;
- std::tie(exponent_value, exponent_bdim) = unwrapTensorAtLevel(exponent, cur_level);
- auto results = batch_rule(self, exponent_value, exponent_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor pow_Tensor_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & exponent) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::pow_Tensor_Scalar::call(self, exponent);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, exponent);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & pow__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & exponent) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::pow__Scalar::call(self, exponent);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, exponent);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & pow__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & exponent) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(exponent, cur_level)) {
- return at::_ops::pow__Tensor::call(self, exponent);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor exponent_value;
- optional<int64_t> exponent_bdim;
- std::tie(exponent_value, exponent_bdim) = unwrapTensorAtLevel(exponent, cur_level);
- batch_rule(self_value, self_bdim, exponent_value, exponent_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor float_power_Tensor_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & exponent) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(exponent, cur_level)) {
- return at::_ops::float_power_Tensor_Tensor::call(self, exponent);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor exponent_value;
- optional<int64_t> exponent_bdim;
- std::tie(exponent_value, exponent_bdim) = unwrapTensorAtLevel(exponent, cur_level);
- auto results = batch_rule(self_value, self_bdim, exponent_value, exponent_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor float_power_Scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & exponent) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(exponent, cur_level)) {
- return at::_ops::float_power_Scalar::call(self, exponent);
- }
- Tensor exponent_value;
- optional<int64_t> exponent_bdim;
- std::tie(exponent_value, exponent_bdim) = unwrapTensorAtLevel(exponent, cur_level);
- auto results = batch_rule(self, exponent_value, exponent_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor float_power_Tensor_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & exponent) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::float_power_Tensor_Scalar::call(self, exponent);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, exponent);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & float_power__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & exponent) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::float_power__Scalar::call(self, exponent);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, exponent);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & float_power__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & exponent) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(exponent, cur_level)) {
- return at::_ops::float_power__Tensor::call(self, exponent);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor exponent_value;
- optional<int64_t> exponent_bdim;
- std::tie(exponent_value, exponent_bdim) = unwrapTensorAtLevel(exponent, cur_level);
- batch_rule(self_value, self_bdim, exponent_value, exponent_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & normal__generated_plumbing(at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::normal_::call(self, mean, std, generator);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, mean, std, generator);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor normal_functional_generated_plumbing(const at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::normal_functional::call(self, mean, std, generator);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, mean, std, generator);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor normal_Tensor_float_generated_plumbing(const at::Tensor & mean, double std, c10::optional<at::Generator> generator) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(mean, cur_level)) {
- return at::_ops::normal_Tensor_float::call(mean, std, generator);
- }
- Tensor mean_value;
- optional<int64_t> mean_bdim;
- std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level);
- auto results = batch_rule(mean_value, mean_bdim, std, generator);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor normal_float_Tensor_generated_plumbing(double mean, const at::Tensor & std, c10::optional<at::Generator> generator) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(std, cur_level)) {
- return at::_ops::normal_float_Tensor::call(mean, std, generator);
- }
- Tensor std_value;
- optional<int64_t> std_bdim;
- std::tie(std_value, std_bdim) = unwrapTensorAtLevel(std, cur_level);
- auto results = batch_rule(mean, std_value, std_bdim, generator);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor normal_Tensor_Tensor_generated_plumbing(const at::Tensor & mean, const at::Tensor & std, c10::optional<at::Generator> generator) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(std, cur_level)) {
- return at::_ops::normal_Tensor_Tensor::call(mean, std, generator);
- }
- Tensor mean_value;
- optional<int64_t> mean_bdim;
- std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level);
- Tensor std_value;
- optional<int64_t> std_bdim;
- std::tie(std_value, std_bdim) = unwrapTensorAtLevel(std, cur_level);
- auto results = batch_rule(mean_value, mean_bdim, std_value, std_bdim, generator);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor alias_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::alias::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _amp_foreach_non_finite_check_and_unscale__generated_plumbing(at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(inv_scale, cur_level)) {
- return at::_ops::_amp_foreach_non_finite_check_and_unscale_::call(self, found_inf, inv_scale);
- }
- Tensor found_inf_value;
- optional<int64_t> found_inf_bdim;
- std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf, cur_level);
- Tensor inv_scale_value;
- optional<int64_t> inv_scale_bdim;
- std::tie(inv_scale_value, inv_scale_bdim) = unwrapTensorAtLevel(inv_scale, cur_level);
- batch_rule(self, found_inf_value, found_inf_bdim, inv_scale_value, inv_scale_bdim);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_add_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_add_Scalar::call(self, scalar);
- }
- auto results = batch_rule(self, scalar);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_add__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_add__Scalar::call(self, scalar);
- }
- batch_rule(self, scalar);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_sub_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_sub_Scalar::call(self, scalar);
- }
- auto results = batch_rule(self, scalar);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_sub__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_sub__Scalar::call(self, scalar);
- }
- batch_rule(self, scalar);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_mul_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_mul_Scalar::call(self, scalar);
- }
- auto results = batch_rule(self, scalar);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_mul__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_mul__Scalar::call(self, scalar);
- }
- batch_rule(self, scalar);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_div_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_div_Scalar::call(self, scalar);
- }
- auto results = batch_rule(self, scalar);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_div__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_div__Scalar::call(self, scalar);
- }
- batch_rule(self, scalar);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_clamp_min_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_clamp_min_Scalar::call(self, scalar);
- }
- auto results = batch_rule(self, scalar);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_clamp_min__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_clamp_min__Scalar::call(self, scalar);
- }
- batch_rule(self, scalar);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_clamp_max_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_clamp_max_Scalar::call(self, scalar);
- }
- auto results = batch_rule(self, scalar);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_clamp_max__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_clamp_max__Scalar::call(self, scalar);
- }
- batch_rule(self, scalar);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_maximum_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_maximum_Scalar::call(self, scalar);
- }
- auto results = batch_rule(self, scalar);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_maximum__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_maximum__Scalar::call(self, scalar);
- }
- batch_rule(self, scalar);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_minimum_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_minimum_Scalar::call(self, scalar);
- }
- auto results = batch_rule(self, scalar);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_minimum__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_minimum__Scalar::call(self, scalar);
- }
- batch_rule(self, scalar);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_add_List_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::_foreach_add_List::call(self, other, alpha);
- }
- auto results = batch_rule(self, other, alpha);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_add__List_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::_foreach_add__List::call(self, other, alpha);
- }
- batch_rule(self, other, alpha);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_sub_List_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::_foreach_sub_List::call(self, other, alpha);
- }
- auto results = batch_rule(self, other, alpha);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_sub__List_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::_foreach_sub__List::call(self, other, alpha);
- }
- batch_rule(self, other, alpha);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_mul_List_generated_plumbing(at::TensorList self, at::TensorList other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::_foreach_mul_List::call(self, other);
- }
- auto results = batch_rule(self, other);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_mul__List_generated_plumbing(at::TensorList self, at::TensorList other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::_foreach_mul__List::call(self, other);
- }
- batch_rule(self, other);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_div_List_generated_plumbing(at::TensorList self, at::TensorList other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::_foreach_div_List::call(self, other);
- }
- auto results = batch_rule(self, other);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_div__List_generated_plumbing(at::TensorList self, at::TensorList other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::_foreach_div__List::call(self, other);
- }
- batch_rule(self, other);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_clamp_min_List_generated_plumbing(at::TensorList self, at::TensorList other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::_foreach_clamp_min_List::call(self, other);
- }
- auto results = batch_rule(self, other);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_clamp_min__List_generated_plumbing(at::TensorList self, at::TensorList other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::_foreach_clamp_min__List::call(self, other);
- }
- batch_rule(self, other);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_clamp_max_List_generated_plumbing(at::TensorList self, at::TensorList other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::_foreach_clamp_max_List::call(self, other);
- }
- auto results = batch_rule(self, other);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_clamp_max__List_generated_plumbing(at::TensorList self, at::TensorList other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::_foreach_clamp_max__List::call(self, other);
- }
- batch_rule(self, other);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_maximum_List_generated_plumbing(at::TensorList self, at::TensorList other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::_foreach_maximum_List::call(self, other);
- }
- auto results = batch_rule(self, other);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_maximum__List_generated_plumbing(at::TensorList self, at::TensorList other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::_foreach_maximum__List::call(self, other);
- }
- batch_rule(self, other);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_minimum_List_generated_plumbing(at::TensorList self, at::TensorList other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::_foreach_minimum_List::call(self, other);
- }
- auto results = batch_rule(self, other);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_minimum__List_generated_plumbing(at::TensorList self, at::TensorList other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::_foreach_minimum__List::call(self, other);
- }
- batch_rule(self, other);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_add_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_add_ScalarList::call(self, scalars);
- }
- auto results = batch_rule(self, scalars);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_add__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_add__ScalarList::call(self, scalars);
- }
- batch_rule(self, scalars);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_sub_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_sub_ScalarList::call(self, scalars);
- }
- auto results = batch_rule(self, scalars);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_sub__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_sub__ScalarList::call(self, scalars);
- }
- batch_rule(self, scalars);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_div_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_div_ScalarList::call(self, scalars);
- }
- auto results = batch_rule(self, scalars);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_div__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_div__ScalarList::call(self, scalars);
- }
- batch_rule(self, scalars);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_mul_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_mul_ScalarList::call(self, scalars);
- }
- auto results = batch_rule(self, scalars);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_mul__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_mul__ScalarList::call(self, scalars);
- }
- batch_rule(self, scalars);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_clamp_min_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_clamp_min_ScalarList::call(self, scalars);
- }
- auto results = batch_rule(self, scalars);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_clamp_min__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_clamp_min__ScalarList::call(self, scalars);
- }
- batch_rule(self, scalars);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_clamp_max_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_clamp_max_ScalarList::call(self, scalars);
- }
- auto results = batch_rule(self, scalars);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_clamp_max__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_clamp_max__ScalarList::call(self, scalars);
- }
- batch_rule(self, scalars);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_maximum_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_maximum_ScalarList::call(self, scalars);
- }
- auto results = batch_rule(self, scalars);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_maximum__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_maximum__ScalarList::call(self, scalars);
- }
- batch_rule(self, scalars);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_minimum_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_minimum_ScalarList::call(self, scalars);
- }
- auto results = batch_rule(self, scalars);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_minimum__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_minimum__ScalarList::call(self, scalars);
- }
- batch_rule(self, scalars);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_exp_generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_exp::call(self);
- }
- auto results = batch_rule(self);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_zero__generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_zero_::call(self);
- }
- batch_rule(self);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_exp__generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_exp_::call(self);
- }
- batch_rule(self);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_sqrt_generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_sqrt::call(self);
- }
- auto results = batch_rule(self);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_sqrt__generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_sqrt_::call(self);
- }
- batch_rule(self);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_abs_generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_abs::call(self);
- }
- auto results = batch_rule(self);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_abs__generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_abs_::call(self);
- }
- batch_rule(self);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_acos_generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_acos::call(self);
- }
- auto results = batch_rule(self);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_acos__generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_acos_::call(self);
- }
- batch_rule(self);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_asin_generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_asin::call(self);
- }
- auto results = batch_rule(self);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_asin__generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_asin_::call(self);
- }
- batch_rule(self);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_atan_generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_atan::call(self);
- }
- auto results = batch_rule(self);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_atan__generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_atan_::call(self);
- }
- batch_rule(self);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_ceil_generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_ceil::call(self);
- }
- auto results = batch_rule(self);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_ceil__generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_ceil_::call(self);
- }
- batch_rule(self);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_cos_generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_cos::call(self);
- }
- auto results = batch_rule(self);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_cos__generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_cos_::call(self);
- }
- batch_rule(self);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_cosh_generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_cosh::call(self);
- }
- auto results = batch_rule(self);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_cosh__generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_cosh_::call(self);
- }
- batch_rule(self);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_erf_generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_erf::call(self);
- }
- auto results = batch_rule(self);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_erf__generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_erf_::call(self);
- }
- batch_rule(self);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_erfc_generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_erfc::call(self);
- }
- auto results = batch_rule(self);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_erfc__generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_erfc_::call(self);
- }
- batch_rule(self);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_expm1_generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_expm1::call(self);
- }
- auto results = batch_rule(self);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_expm1__generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_expm1_::call(self);
- }
- batch_rule(self);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_floor_generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_floor::call(self);
- }
- auto results = batch_rule(self);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_floor__generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_floor_::call(self);
- }
- batch_rule(self);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_log_generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_log::call(self);
- }
- auto results = batch_rule(self);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_log__generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_log_::call(self);
- }
- batch_rule(self);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_log10_generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_log10::call(self);
- }
- auto results = batch_rule(self);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_log10__generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_log10_::call(self);
- }
- batch_rule(self);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_log1p_generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_log1p::call(self);
- }
- auto results = batch_rule(self);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_log1p__generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_log1p_::call(self);
- }
- batch_rule(self);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_log2_generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_log2::call(self);
- }
- auto results = batch_rule(self);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_log2__generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_log2_::call(self);
- }
- batch_rule(self);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_neg_generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_neg::call(self);
- }
- auto results = batch_rule(self);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_neg__generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_neg_::call(self);
- }
- batch_rule(self);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_tan_generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_tan::call(self);
- }
- auto results = batch_rule(self);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_tan__generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_tan_::call(self);
- }
- batch_rule(self);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_tanh_generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_tanh::call(self);
- }
- auto results = batch_rule(self);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_tanh__generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_tanh_::call(self);
- }
- batch_rule(self);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_sin_generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_sin::call(self);
- }
- auto results = batch_rule(self);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_sin__generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_sin_::call(self);
- }
- batch_rule(self);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_sinh_generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_sinh::call(self);
- }
- auto results = batch_rule(self);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_sinh__generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_sinh_::call(self);
- }
- batch_rule(self);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_round_generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_round::call(self);
- }
- auto results = batch_rule(self);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_round__generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_round_::call(self);
- }
- batch_rule(self);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_lgamma_generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_lgamma::call(self);
- }
- auto results = batch_rule(self);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_lgamma__generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_lgamma_::call(self);
- }
- batch_rule(self);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_frac_generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_frac::call(self);
- }
- auto results = batch_rule(self);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_frac__generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_frac_::call(self);
- }
- batch_rule(self);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_reciprocal_generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_reciprocal::call(self);
- }
- auto results = batch_rule(self);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_reciprocal__generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_reciprocal_::call(self);
- }
- batch_rule(self);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_sigmoid_generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_sigmoid::call(self);
- }
- auto results = batch_rule(self);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_sigmoid__generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_sigmoid_::call(self);
- }
- batch_rule(self);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_trunc_generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_trunc::call(self);
- }
- auto results = batch_rule(self);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_trunc__generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_trunc_::call(self);
- }
- batch_rule(self);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_addcdiv__Scalar_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
- return at::_ops::_foreach_addcdiv__Scalar::call(self, tensor1, tensor2, value);
- }
- batch_rule(self, tensor1, tensor2, value);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_addcmul__Scalar_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
- return at::_ops::_foreach_addcmul__Scalar::call(self, tensor1, tensor2, value);
- }
- batch_rule(self, tensor1, tensor2, value);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_addcdiv__ScalarList_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
- return at::_ops::_foreach_addcdiv__ScalarList::call(self, tensor1, tensor2, scalars);
- }
- batch_rule(self, tensor1, tensor2, scalars);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_addcdiv__Tensor_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level)) {
- return at::_ops::_foreach_addcdiv__Tensor::call(self, tensor1, tensor2, scalars);
- }
- Tensor scalars_value;
- optional<int64_t> scalars_bdim;
- std::tie(scalars_value, scalars_bdim) = unwrapTensorAtLevel(scalars, cur_level);
- batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_addcmul__ScalarList_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
- return at::_ops::_foreach_addcmul__ScalarList::call(self, tensor1, tensor2, scalars);
- }
- batch_rule(self, tensor1, tensor2, scalars);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_addcmul__Tensor_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level)) {
- return at::_ops::_foreach_addcmul__Tensor::call(self, tensor1, tensor2, scalars);
- }
- Tensor scalars_value;
- optional<int64_t> scalars_bdim;
- std::tie(scalars_value, scalars_bdim) = unwrapTensorAtLevel(scalars, cur_level);
- batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_addcdiv_Scalar_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
- return at::_ops::_foreach_addcdiv_Scalar::call(self, tensor1, tensor2, value);
- }
- auto results = batch_rule(self, tensor1, tensor2, value);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_addcmul_Scalar_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
- return at::_ops::_foreach_addcmul_Scalar::call(self, tensor1, tensor2, value);
- }
- auto results = batch_rule(self, tensor1, tensor2, value);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_addcdiv_ScalarList_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
- return at::_ops::_foreach_addcdiv_ScalarList::call(self, tensor1, tensor2, scalars);
- }
- auto results = batch_rule(self, tensor1, tensor2, scalars);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_addcdiv_Tensor_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level)) {
- return at::_ops::_foreach_addcdiv_Tensor::call(self, tensor1, tensor2, scalars);
- }
- Tensor scalars_value;
- optional<int64_t> scalars_bdim;
- std::tie(scalars_value, scalars_bdim) = unwrapTensorAtLevel(scalars, cur_level);
- auto results = batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_addcmul_ScalarList_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
- return at::_ops::_foreach_addcmul_ScalarList::call(self, tensor1, tensor2, scalars);
- }
- auto results = batch_rule(self, tensor1, tensor2, scalars);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_addcmul_Tensor_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level)) {
- return at::_ops::_foreach_addcmul_Tensor::call(self, tensor1, tensor2, scalars);
- }
- Tensor scalars_value;
- optional<int64_t> scalars_bdim;
- std::tie(scalars_value, scalars_bdim) = unwrapTensorAtLevel(scalars, cur_level);
- auto results = batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_norm_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & ord) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_norm_Scalar::call(self, ord);
- }
- auto results = batch_rule(self, ord);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_lerp_List_generated_plumbing(at::TensorList self, at::TensorList tensors1, at::TensorList weights) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level) && !isBatchedAtLevel(weights, cur_level)) {
- return at::_ops::_foreach_lerp_List::call(self, tensors1, weights);
- }
- auto results = batch_rule(self, tensors1, weights);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_lerp__List_generated_plumbing(at::TensorList self, at::TensorList tensors1, at::TensorList weights) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level) && !isBatchedAtLevel(weights, cur_level)) {
- return at::_ops::_foreach_lerp__List::call(self, tensors1, weights);
- }
- batch_rule(self, tensors1, weights);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_lerp_Scalar_generated_plumbing(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level)) {
- return at::_ops::_foreach_lerp_Scalar::call(self, tensors1, weight);
- }
- auto results = batch_rule(self, tensors1, weight);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_lerp__Scalar_generated_plumbing(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level)) {
- return at::_ops::_foreach_lerp__Scalar::call(self, tensors1, weight);
- }
- batch_rule(self, tensors1, weight);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor bucketize_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & boundaries, bool out_int32, bool right) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(boundaries, cur_level)) {
- return at::_ops::bucketize_Tensor::call(self, boundaries, out_int32, right);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor boundaries_value;
- optional<int64_t> boundaries_bdim;
- std::tie(boundaries_value, boundaries_bdim) = unwrapTensorAtLevel(boundaries, cur_level);
- auto results = batch_rule(self_value, self_bdim, boundaries_value, boundaries_bdim, out_int32, right);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor bucketize_Scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & boundaries, bool out_int32, bool right) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(boundaries, cur_level)) {
- return at::_ops::bucketize_Scalar::call(self, boundaries, out_int32, right);
- }
- Tensor boundaries_value;
- optional<int64_t> boundaries_bdim;
- std::tie(boundaries_value, boundaries_bdim) = unwrapTensorAtLevel(boundaries, cur_level);
- auto results = batch_rule(self, boundaries_value, boundaries_bdim, out_int32, right);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor searchsorted_Tensor_generated_plumbing(const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, c10::optional<c10::string_view> side, const c10::optional<at::Tensor> & sorter) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(sorted_sequence, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(sorter, cur_level)) {
- return at::_ops::searchsorted_Tensor::call(sorted_sequence, self, out_int32, right, side, sorter);
- }
- Tensor sorted_sequence_value;
- optional<int64_t> sorted_sequence_bdim;
- std::tie(sorted_sequence_value, sorted_sequence_bdim) = unwrapTensorAtLevel(sorted_sequence, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- optional<Tensor> sorter_value;
- optional<int64_t> sorter_bdim;
- if (sorter) {
- std::tie(sorter_value, sorter_bdim) = unwrapTensorAtLevel(sorter.value(), cur_level);
- }
- auto results = batch_rule(sorted_sequence_value, sorted_sequence_bdim, self_value, self_bdim, out_int32, right, side, sorter_value, sorter_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor searchsorted_Scalar_generated_plumbing(const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32, bool right, c10::optional<c10::string_view> side, const c10::optional<at::Tensor> & sorter) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(sorted_sequence, cur_level) && !isBatchedAtLevel(sorter, cur_level)) {
- return at::_ops::searchsorted_Scalar::call(sorted_sequence, self, out_int32, right, side, sorter);
- }
- Tensor sorted_sequence_value;
- optional<int64_t> sorted_sequence_bdim;
- std::tie(sorted_sequence_value, sorted_sequence_bdim) = unwrapTensorAtLevel(sorted_sequence, cur_level);
- optional<Tensor> sorter_value;
- optional<int64_t> sorter_bdim;
- if (sorter) {
- std::tie(sorter_value, sorter_bdim) = unwrapTensorAtLevel(sorter.value(), cur_level);
- }
- auto results = batch_rule(sorted_sequence_value, sorted_sequence_bdim, self, out_int32, right, side, sorter_value, sorter_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _convert_indices_from_coo_to_csr_generated_plumbing(const at::Tensor & self, int64_t size, bool out_int32) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_convert_indices_from_coo_to_csr::call(self, size, out_int32);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, size, out_int32);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _convert_indices_from_csr_to_coo_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32, bool transpose) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level)) {
- return at::_ops::_convert_indices_from_csr_to_coo::call(crow_indices, col_indices, out_int32, transpose);
- }
- Tensor crow_indices_value;
- optional<int64_t> crow_indices_bdim;
- std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level);
- Tensor col_indices_value;
- optional<int64_t> col_indices_bdim;
- std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level);
- auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, out_int32, transpose);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor mse_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
- return at::_ops::mse_loss::call(self, target, reduction);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor target_value;
- optional<int64_t> target_bdim;
- std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
- auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor mse_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
- return at::_ops::mse_loss_backward::call(grad_output, self, target, reduction);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor target_value;
- optional<int64_t> target_bdim;
- std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, reduction);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor l1_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
- return at::_ops::l1_loss::call(self, target, reduction);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor target_value;
- optional<int64_t> target_bdim;
- std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
- auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor multi_margin_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional<at::Tensor> & weight, int64_t reduction) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
- return at::_ops::multi_margin_loss::call(self, target, p, margin, weight, reduction);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor target_value;
- optional<int64_t> target_bdim;
- std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
- optional<Tensor> weight_value;
- optional<int64_t> weight_bdim;
- if (weight) {
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, p, margin, weight_value, weight_bdim, reduction);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor multi_margin_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional<at::Tensor> & weight, int64_t reduction) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
- return at::_ops::multi_margin_loss_backward::call(grad_output, self, target, p, margin, weight, reduction);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor target_value;
- optional<int64_t> target_bdim;
- std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
- optional<Tensor> weight_value;
- optional<int64_t> weight_bdim;
- if (weight) {
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
- }
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, p, margin, weight_value, weight_bdim, reduction);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor multilabel_margin_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
- return at::_ops::multilabel_margin_loss::call(self, target, reduction);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor target_value;
- optional<int64_t> target_bdim;
- std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
- auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> multilabel_margin_loss_forward_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
- return at::_ops::multilabel_margin_loss_forward::call(self, target, reduction);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor target_value;
- optional<int64_t> target_bdim;
- std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
- auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor multilabel_margin_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(is_target, cur_level)) {
- return at::_ops::multilabel_margin_loss_backward::call(grad_output, self, target, reduction, is_target);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor target_value;
- optional<int64_t> target_bdim;
- std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
- Tensor is_target_value;
- optional<int64_t> is_target_bdim;
- std::tie(is_target_value, is_target_bdim) = unwrapTensorAtLevel(is_target, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, reduction, is_target_value, is_target_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor nll_loss_nd_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
- return at::_ops::nll_loss_nd::call(self, target, weight, reduction, ignore_index);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor target_value;
- optional<int64_t> target_bdim;
- std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
- optional<Tensor> weight_value;
- optional<int64_t> weight_bdim;
- if (weight) {
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor nll_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
- return at::_ops::nll_loss::call(self, target, weight, reduction, ignore_index);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor target_value;
- optional<int64_t> target_bdim;
- std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
- optional<Tensor> weight_value;
- optional<int64_t> weight_bdim;
- if (weight) {
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> nll_loss_forward_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
- return at::_ops::nll_loss_forward::call(self, target, weight, reduction, ignore_index);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor target_value;
- optional<int64_t> target_bdim;
- std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
- optional<Tensor> weight_value;
- optional<int64_t> weight_bdim;
- if (weight) {
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor nll_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(total_weight, cur_level)) {
- return at::_ops::nll_loss_backward::call(grad_output, self, target, weight, reduction, ignore_index, total_weight);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor target_value;
- optional<int64_t> target_bdim;
- std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
- Tensor total_weight_value;
- optional<int64_t> total_weight_bdim;
- std::tie(total_weight_value, total_weight_bdim) = unwrapTensorAtLevel(total_weight, cur_level);
- optional<Tensor> weight_value;
- optional<int64_t> weight_bdim;
- if (weight) {
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
- }
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index, total_weight_value, total_weight_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor nll_loss2d_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
- return at::_ops::nll_loss2d::call(self, target, weight, reduction, ignore_index);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor target_value;
- optional<int64_t> target_bdim;
- std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
- optional<Tensor> weight_value;
- optional<int64_t> weight_bdim;
- if (weight) {
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> nll_loss2d_forward_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
- return at::_ops::nll_loss2d_forward::call(self, target, weight, reduction, ignore_index);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor target_value;
- optional<int64_t> target_bdim;
- std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
- optional<Tensor> weight_value;
- optional<int64_t> weight_bdim;
- if (weight) {
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor nll_loss2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(total_weight, cur_level)) {
- return at::_ops::nll_loss2d_backward::call(grad_output, self, target, weight, reduction, ignore_index, total_weight);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor target_value;
- optional<int64_t> target_bdim;
- std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
- Tensor total_weight_value;
- optional<int64_t> total_weight_bdim;
- std::tie(total_weight_value, total_weight_bdim) = unwrapTensorAtLevel(total_weight, cur_level);
- optional<Tensor> weight_value;
- optional<int64_t> weight_bdim;
- if (weight) {
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
- }
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index, total_weight_value, total_weight_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor smooth_l1_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
- return at::_ops::smooth_l1_loss::call(self, target, reduction, beta);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor target_value;
- optional<int64_t> target_bdim;
- std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
- auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction, beta);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor smooth_l1_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
- return at::_ops::smooth_l1_loss_backward::call(grad_output, self, target, reduction, beta);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor target_value;
- optional<int64_t> target_bdim;
- std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, reduction, beta);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor huber_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
- return at::_ops::huber_loss::call(self, target, reduction, delta);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor target_value;
- optional<int64_t> target_bdim;
- std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
- auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction, delta);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor huber_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
- return at::_ops::huber_loss_backward::call(grad_output, self, target, reduction, delta);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor target_value;
- optional<int64_t> target_bdim;
- std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, reduction, delta);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor soft_margin_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
- return at::_ops::soft_margin_loss::call(self, target, reduction);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor target_value;
- optional<int64_t> target_bdim;
- std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
- auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor soft_margin_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
- return at::_ops::soft_margin_loss_backward::call(grad_output, self, target, reduction);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor target_value;
- optional<int64_t> target_bdim;
- std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, reduction);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor elu_generated_plumbing(const at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::elu::call(self, alpha, scale, input_scale);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, alpha, scale, input_scale);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor elu_backward_generated_plumbing(const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self_or_result, cur_level)) {
- return at::_ops::elu_backward::call(grad_output, alpha, scale, input_scale, is_result, self_or_result);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_or_result_value;
- optional<int64_t> self_or_result_bdim;
- std::tie(self_or_result_value, self_or_result_bdim) = unwrapTensorAtLevel(self_or_result, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, alpha, scale, input_scale, is_result, self_or_result_value, self_or_result_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & elu__generated_plumbing(at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::elu_::call(self, alpha, scale, input_scale);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, alpha, scale, input_scale);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor glu_generated_plumbing(const at::Tensor & self, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::glu::call(self, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor glu_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
- return at::_ops::glu_backward::call(grad_output, self, dim);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor glu_jvp_generated_plumbing(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(glu, cur_level) && !isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(dx, cur_level)) {
- return at::_ops::glu_jvp::call(glu, x, dx, dim);
- }
- Tensor glu_value;
- optional<int64_t> glu_bdim;
- std::tie(glu_value, glu_bdim) = unwrapTensorAtLevel(glu, cur_level);
- Tensor x_value;
- optional<int64_t> x_bdim;
- std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
- Tensor dx_value;
- optional<int64_t> dx_bdim;
- std::tie(dx_value, dx_bdim) = unwrapTensorAtLevel(dx, cur_level);
- auto results = batch_rule(glu_value, glu_bdim, x_value, x_bdim, dx_value, dx_bdim, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor glu_backward_jvp_generated_plumbing(const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_x, cur_level) && !isBatchedAtLevel(grad_glu, cur_level) && !isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(dgrad_glu, cur_level) && !isBatchedAtLevel(dx, cur_level)) {
- return at::_ops::glu_backward_jvp::call(grad_x, grad_glu, x, dgrad_glu, dx, dim);
- }
- Tensor grad_x_value;
- optional<int64_t> grad_x_bdim;
- std::tie(grad_x_value, grad_x_bdim) = unwrapTensorAtLevel(grad_x, cur_level);
- Tensor grad_glu_value;
- optional<int64_t> grad_glu_bdim;
- std::tie(grad_glu_value, grad_glu_bdim) = unwrapTensorAtLevel(grad_glu, cur_level);
- Tensor x_value;
- optional<int64_t> x_bdim;
- std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
- Tensor dgrad_glu_value;
- optional<int64_t> dgrad_glu_bdim;
- std::tie(dgrad_glu_value, dgrad_glu_bdim) = unwrapTensorAtLevel(dgrad_glu, cur_level);
- Tensor dx_value;
- optional<int64_t> dx_bdim;
- std::tie(dx_value, dx_bdim) = unwrapTensorAtLevel(dx, cur_level);
- auto results = batch_rule(grad_x_value, grad_x_bdim, grad_glu_value, grad_glu_bdim, x_value, x_bdim, dgrad_glu_value, dgrad_glu_bdim, dx_value, dx_bdim, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor hardsigmoid_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::hardsigmoid::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & hardsigmoid__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::hardsigmoid_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor hardsigmoid_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
- return at::_ops::hardsigmoid_backward::call(grad_output, self);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor hardtanh_generated_plumbing(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::hardtanh::call(self, min_val, max_val);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, min_val, max_val);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor hardtanh_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
- return at::_ops::hardtanh_backward::call(grad_output, self, min_val, max_val);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, min_val, max_val);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & hardtanh__generated_plumbing(at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::hardtanh_::call(self, min_val, max_val);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, min_val, max_val);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor hardswish_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::hardswish::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & hardswish__generated_plumbing(at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::hardswish_::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor hardswish_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
- return at::_ops::hardswish_backward::call(grad_output, self);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor leaky_relu_generated_plumbing(const at::Tensor & self, const at::Scalar & negative_slope) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::leaky_relu::call(self, negative_slope);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, negative_slope);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor leaky_relu_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
- return at::_ops::leaky_relu_backward::call(grad_output, self, negative_slope, self_is_result);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, negative_slope, self_is_result);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & leaky_relu__generated_plumbing(at::Tensor & self, const at::Scalar & negative_slope) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::leaky_relu_::call(self, negative_slope);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, negative_slope);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor log_sigmoid_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::log_sigmoid::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> log_sigmoid_forward_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::log_sigmoid_forward::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor log_sigmoid_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(buffer, cur_level)) {
- return at::_ops::log_sigmoid_backward::call(grad_output, self, buffer);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor buffer_value;
- optional<int64_t> buffer_bdim;
- std::tie(buffer_value, buffer_bdim) = unwrapTensorAtLevel(buffer, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, buffer_value, buffer_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor rrelu_with_noise_generated_plumbing(const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional<at::Generator> generator) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(noise, cur_level)) {
- return at::_ops::rrelu_with_noise::call(self, noise, lower, upper, training, generator);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor noise_value;
- optional<int64_t> noise_bdim;
- std::tie(noise_value, noise_bdim) = unwrapTensorAtLevel(noise, cur_level);
- auto results = batch_rule(self_value, self_bdim, noise_value, noise_bdim, lower, upper, training, generator);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor rrelu_with_noise_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, bool self_is_result) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(noise, cur_level)) {
- return at::_ops::rrelu_with_noise_backward::call(grad_output, self, noise, lower, upper, training, self_is_result);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor noise_value;
- optional<int64_t> noise_bdim;
- std::tie(noise_value, noise_bdim) = unwrapTensorAtLevel(noise, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, noise_value, noise_bdim, lower, upper, training, self_is_result);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor & rrelu_with_noise__generated_plumbing(at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional<at::Generator> generator) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(noise, cur_level)) {
- return at::_ops::rrelu_with_noise_::call(self, noise, lower, upper, training, generator);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor noise_value;
- optional<int64_t> noise_bdim;
- std::tie(noise_value, noise_bdim) = unwrapTensorAtLevel(noise, cur_level);
- batch_rule(self_value, self_bdim, noise_value, noise_bdim, lower, upper, training, generator);
- return self;
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor softplus_generated_plumbing(const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::softplus::call(self, beta, threshold);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, beta, threshold);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor softplus_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
- return at::_ops::softplus_backward::call(grad_output, self, beta, threshold);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, beta, threshold);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor softshrink_generated_plumbing(const at::Tensor & self, const at::Scalar & lambd) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::softshrink::call(self, lambd);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, lambd);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor softshrink_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
- return at::_ops::softshrink_backward::call(grad_output, self, lambd);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, lambd);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor adaptive_avg_pool2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::adaptive_avg_pool2d::call(self, output_size);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, output_size);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor mkldnn_adaptive_avg_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef output_size) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::mkldnn_adaptive_avg_pool2d::call(self, output_size);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, output_size);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor mkldnn_adaptive_avg_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
- return at::_ops::mkldnn_adaptive_avg_pool2d_backward::call(grad_output, self);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _adaptive_avg_pool2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_adaptive_avg_pool2d::call(self, output_size);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, output_size);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _adaptive_avg_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_adaptive_avg_pool2d_backward::call(grad_output, self);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor adaptive_avg_pool3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::adaptive_avg_pool3d::call(self, output_size);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, output_size);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _adaptive_avg_pool3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_adaptive_avg_pool3d::call(self, output_size);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, output_size);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _adaptive_avg_pool3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_adaptive_avg_pool3d_backward::call(grad_output, self);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef output_size) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::adaptive_max_pool2d::call(self, output_size);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, output_size);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor adaptive_max_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
- return at::_ops::adaptive_max_pool2d_backward::call(grad_output, self, indices);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor indices_value;
- optional<int64_t> indices_bdim;
- std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, indices_value, indices_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool3d_generated_plumbing(const at::Tensor & self, at::IntArrayRef output_size) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::adaptive_max_pool3d::call(self, output_size);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, output_size);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor adaptive_max_pool3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
- return at::_ops::adaptive_max_pool3d_backward::call(grad_output, self, indices);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor indices_value;
- optional<int64_t> indices_bdim;
- std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, indices_value, indices_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor avg_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::avg_pool2d::call(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor avg_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
- return at::_ops::avg_pool2d_backward::call(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor avg_pool3d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::avg_pool3d::call(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor avg_pool3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
- return at::_ops::avg_pool3d_backward::call(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> fractional_max_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(random_samples, cur_level)) {
- return at::_ops::fractional_max_pool2d::call(self, kernel_size, output_size, random_samples);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor random_samples_value;
- optional<int64_t> random_samples_bdim;
- std::tie(random_samples_value, random_samples_bdim) = unwrapTensorAtLevel(random_samples, cur_level);
- auto results = batch_rule(self_value, self_bdim, kernel_size, output_size, random_samples_value, random_samples_bdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fractional_max_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
- return at::_ops::fractional_max_pool2d_backward::call(grad_output, self, kernel_size, output_size, indices);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor indices_value;
- optional<int64_t> indices_bdim;
- std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, output_size, indices_value, indices_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> fractional_max_pool3d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(random_samples, cur_level)) {
- return at::_ops::fractional_max_pool3d::call(self, kernel_size, output_size, random_samples);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor random_samples_value;
- optional<int64_t> random_samples_bdim;
- std::tie(random_samples_value, random_samples_bdim) = unwrapTensorAtLevel(random_samples, cur_level);
- auto results = batch_rule(self_value, self_bdim, kernel_size, output_size, random_samples_value, random_samples_bdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fractional_max_pool3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
- return at::_ops::fractional_max_pool3d_backward::call(grad_output, self, kernel_size, output_size, indices);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor indices_value;
- optional<int64_t> indices_bdim;
- std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, output_size, indices_value, indices_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> max_pool2d_with_indices_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::max_pool2d_with_indices::call(self, kernel_size, stride, padding, dilation, ceil_mode);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor max_pool2d_with_indices_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
- return at::_ops::max_pool2d_with_indices_backward::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor indices_value;
- optional<int64_t> indices_bdim;
- std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode, indices_value, indices_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> max_pool3d_with_indices_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::max_pool3d_with_indices::call(self, kernel_size, stride, padding, dilation, ceil_mode);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor max_pool3d_with_indices_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
- return at::_ops::max_pool3d_with_indices_backward::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor indices_value;
- optional<int64_t> indices_bdim;
- std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode, indices_value, indices_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor max_unpool2d_generated_plumbing(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
- return at::_ops::max_unpool2d::call(self, indices, output_size);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor indices_value;
- optional<int64_t> indices_bdim;
- std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
- auto results = batch_rule(self_value, self_bdim, indices_value, indices_bdim, output_size);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor max_unpool3d_generated_plumbing(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
- return at::_ops::max_unpool3d::call(self, indices, output_size, stride, padding);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor indices_value;
- optional<int64_t> indices_bdim;
- std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
- auto results = batch_rule(self_value, self_bdim, indices_value, indices_bdim, output_size, stride, padding);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor reflection_pad1d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::reflection_pad1d::call(self, padding);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, padding);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor reflection_pad1d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
- return at::_ops::reflection_pad1d_backward::call(grad_output, self, padding);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor reflection_pad2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::reflection_pad2d::call(self, padding);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, padding);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor reflection_pad2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
- return at::_ops::reflection_pad2d_backward::call(grad_output, self, padding);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor reflection_pad3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::reflection_pad3d::call(self, padding);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, padding);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor reflection_pad3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
- return at::_ops::reflection_pad3d_backward::call(grad_output, self, padding);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor replication_pad1d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::replication_pad1d::call(self, padding);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, padding);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor replication_pad1d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
- return at::_ops::replication_pad1d_backward::call(grad_output, self, padding);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor replication_pad2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::replication_pad2d::call(self, padding);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, padding);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor replication_pad2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
- return at::_ops::replication_pad2d_backward::call(grad_output, self, padding);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor replication_pad3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::replication_pad3d::call(self, padding);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, padding);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor replication_pad3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
- return at::_ops::replication_pad3d_backward::call(grad_output, self, padding);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _pad_circular_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef pad) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_pad_circular::call(self, pad);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, pad);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _pad_enum_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef pad, int64_t mode, c10::optional<double> value) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_pad_enum::call(self, pad, mode, value);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, pad, mode, value);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor pad_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef pad, c10::string_view mode, c10::optional<double> value) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::pad::call(self, pad, mode, value);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, pad, mode, value);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor upsample_linear1d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level)) {
- return at::_ops::upsample_linear1d_vec::call(input, output_size, align_corners, scale_factors);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor upsample_bilinear2d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level)) {
- return at::_ops::upsample_bilinear2d_vec::call(input, output_size, align_corners, scale_factors);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _upsample_bilinear2d_aa_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level)) {
- return at::_ops::_upsample_bilinear2d_aa_vec::call(input, output_size, align_corners, scale_factors);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor upsample_trilinear3d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level)) {
- return at::_ops::upsample_trilinear3d_vec::call(input, output_size, align_corners, scale_factors);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor upsample_bicubic2d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level)) {
- return at::_ops::upsample_bicubic2d_vec::call(input, output_size, align_corners, scale_factors);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _upsample_bicubic2d_aa_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level)) {
- return at::_ops::_upsample_bicubic2d_aa_vec::call(input, output_size, align_corners, scale_factors);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor upsample_nearest1d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level)) {
- return at::_ops::upsample_nearest1d_vec::call(input, output_size, scale_factors);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- auto results = batch_rule(input_value, input_bdim, output_size, scale_factors);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _upsample_nearest_exact1d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level)) {
- return at::_ops::_upsample_nearest_exact1d_vec::call(input, output_size, scale_factors);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- auto results = batch_rule(input_value, input_bdim, output_size, scale_factors);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor upsample_nearest2d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level)) {
- return at::_ops::upsample_nearest2d_vec::call(input, output_size, scale_factors);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- auto results = batch_rule(input_value, input_bdim, output_size, scale_factors);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _upsample_nearest_exact2d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level)) {
- return at::_ops::_upsample_nearest_exact2d_vec::call(input, output_size, scale_factors);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- auto results = batch_rule(input_value, input_bdim, output_size, scale_factors);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor upsample_nearest3d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level)) {
- return at::_ops::upsample_nearest3d_vec::call(input, output_size, scale_factors);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- auto results = batch_rule(input_value, input_bdim, output_size, scale_factors);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _upsample_nearest_exact3d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level)) {
- return at::_ops::_upsample_nearest_exact3d_vec::call(input, output_size, scale_factors);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- auto results = batch_rule(input_value, input_bdim, output_size, scale_factors);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor upsample_linear1d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::upsample_linear1d::call(self, output_size, align_corners, scales);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor upsample_linear1d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level)) {
- return at::_ops::upsample_linear1d_backward::call(grad_output, output_size, input_size, align_corners, scales);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor upsample_bilinear2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::upsample_bilinear2d::call(self, output_size, align_corners, scales_h, scales_w);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales_h, scales_w);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor upsample_bilinear2d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level)) {
- return at::_ops::upsample_bilinear2d_backward::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales_h, scales_w);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _upsample_bilinear2d_aa_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_upsample_bilinear2d_aa::call(self, output_size, align_corners, scales_h, scales_w);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales_h, scales_w);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _upsample_bilinear2d_aa_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level)) {
- return at::_ops::_upsample_bilinear2d_aa_backward::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales_h, scales_w);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor upsample_bicubic2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::upsample_bicubic2d::call(self, output_size, align_corners, scales_h, scales_w);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales_h, scales_w);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor upsample_bicubic2d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level)) {
- return at::_ops::upsample_bicubic2d_backward::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales_h, scales_w);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _upsample_bicubic2d_aa_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_upsample_bicubic2d_aa::call(self, output_size, align_corners, scales_h, scales_w);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales_h, scales_w);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _upsample_bicubic2d_aa_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level)) {
- return at::_ops::_upsample_bicubic2d_aa_backward::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales_h, scales_w);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor upsample_trilinear3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::upsample_trilinear3d::call(self, output_size, align_corners, scales_d, scales_h, scales_w);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales_d, scales_h, scales_w);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor upsample_trilinear3d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level)) {
- return at::_ops::upsample_trilinear3d_backward::call(grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales_d, scales_h, scales_w);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor upsample_nearest1d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::upsample_nearest1d::call(self, output_size, scales);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, output_size, scales);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _upsample_nearest_exact1d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_upsample_nearest_exact1d::call(self, output_size, scales);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, output_size, scales);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor upsample_nearest1d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level)) {
- return at::_ops::upsample_nearest1d_backward::call(grad_output, output_size, input_size, scales);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _upsample_nearest_exact1d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level)) {
- return at::_ops::_upsample_nearest_exact1d_backward::call(grad_output, output_size, input_size, scales);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor upsample_nearest2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::upsample_nearest2d::call(self, output_size, scales_h, scales_w);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, output_size, scales_h, scales_w);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _upsample_nearest_exact2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_upsample_nearest_exact2d::call(self, output_size, scales_h, scales_w);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, output_size, scales_h, scales_w);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor upsample_nearest2d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level)) {
- return at::_ops::upsample_nearest2d_backward::call(grad_output, output_size, input_size, scales_h, scales_w);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales_h, scales_w);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _upsample_nearest_exact2d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level)) {
- return at::_ops::_upsample_nearest_exact2d_backward::call(grad_output, output_size, input_size, scales_h, scales_w);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales_h, scales_w);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor upsample_nearest3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::upsample_nearest3d::call(self, output_size, scales_d, scales_h, scales_w);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, output_size, scales_d, scales_h, scales_w);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _upsample_nearest_exact3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_upsample_nearest_exact3d::call(self, output_size, scales_d, scales_h, scales_w);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, output_size, scales_d, scales_h, scales_w);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor upsample_nearest3d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level)) {
- return at::_ops::upsample_nearest3d_backward::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales_d, scales_h, scales_w);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _upsample_nearest_exact3d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level)) {
- return at::_ops::_upsample_nearest_exact3d_backward::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales_d, scales_h, scales_w);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor sigmoid_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level)) {
- return at::_ops::sigmoid_backward::call(grad_output, output);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor output_value;
- optional<int64_t> output_bdim;
- std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor logit_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::optional<double> eps) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
- return at::_ops::logit_backward::call(grad_output, self, eps);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, eps);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor tanh_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level)) {
- return at::_ops::tanh_backward::call(grad_output, output);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor output_value;
- optional<int64_t> output_bdim;
- std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor slow_conv_transpose2d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef dilation) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::slow_conv_transpose2d::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, output_padding, dilation);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor slow_conv_transpose3d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef dilation) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::slow_conv_transpose3d::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, output_padding, dilation);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor thnn_conv2d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::thnn_conv2d::call(self, weight, kernel_size, bias, stride, padding);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _slow_conv2d_forward_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::_slow_conv2d_forward::call(self, weight, kernel_size, bias, stride, padding);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _slow_conv2d_backward_output_mask_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array<bool,3> output_mask) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
- return at::_ops::_slow_conv2d_backward_output_mask::call(grad_output, self, weight, kernel_size, stride, padding, output_mask);
- }
- Tensor grad_output_value;
- optional<int64_t> grad_output_bdim;
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, weight_value, weight_bdim, kernel_size, stride, padding, output_mask);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _conv_depthwise2d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::_conv_depthwise2d::call(self, weight, kernel_size, bias, stride, padding, dilation);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, dilation);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor conv_depthwise3d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::conv_depthwise3d::call(self, weight, kernel_size, bias, stride, padding, dilation);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, dilation);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor slow_conv3d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::slow_conv3d::call(self, weight, kernel_size, bias, stride, padding);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor slow_conv3d_forward_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::slow_conv3d_forward::call(self, weight, kernel_size, bias, stride, padding);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor slow_conv_dilated2d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::slow_conv_dilated2d::call(self, weight, kernel_size, bias, stride, padding, dilation);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, dilation);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor slow_conv_dilated3d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
- return at::_ops::slow_conv_dilated3d::call(self, weight, kernel_size, bias, stride, padding, dilation);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor weight_value;
- optional<int64_t> weight_bdim;
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, dilation);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor col2im_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::col2im::call(self, output_size, kernel_size, dilation, padding, stride);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, output_size, kernel_size, dilation, padding, stride);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor column_stack_generated_plumbing(at::TensorList tensors) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(tensors, cur_level)) {
- return at::_ops::column_stack::call(tensors);
- }
- auto results = batch_rule(tensors);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor im2col_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::im2col::call(self, kernel_size, dilation, padding, stride);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, kernel_size, dilation, padding, stride);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor isfinite_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::isfinite::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor isinf_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::isinf::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void record_stream_generated_plumbing(at::Tensor & self, at::Stream s) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::record_stream::call(self, s);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, s);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor isposinf_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::isposinf::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor isneginf_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::isneginf::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _add_batch_dim_generated_plumbing(const at::Tensor & self, int64_t batch_dim, int64_t level) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_add_batch_dim::call(self, batch_dim, level);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, batch_dim, level);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _remove_batch_dim_generated_plumbing(const at::Tensor & self, int64_t level, int64_t batch_size, int64_t out_dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_remove_batch_dim::call(self, level, batch_size, out_dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, level, batch_size, out_dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_entr_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::special_entr::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_ndtri_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::special_ndtri::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_log_ndtr_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::special_log_ndtr::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_expm1_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::special_expm1::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_exp2_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::special_exp2::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_psi_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::special_psi::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_digamma_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::special_digamma::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_gammaln_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::special_gammaln::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_erf_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::special_erf::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_erfc_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::special_erfc::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_erfcx_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::special_erfcx::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_erfinv_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::special_erfinv::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_ndtr_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::special_ndtr::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_xlog1py_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::special_xlog1py::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_xlog1py_self_scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(other, cur_level)) {
- return at::_ops::special_xlog1py_self_scalar::call(self, other);
- }
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_xlog1py_other_scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::special_xlog1py_other_scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_xlogy_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::special_xlogy::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_xlogy_self_scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(other, cur_level)) {
- return at::_ops::special_xlogy_self_scalar::call(self, other);
- }
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_xlogy_other_scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::special_xlogy_other_scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_zeta_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::special_zeta::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_zeta_self_scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(other, cur_level)) {
- return at::_ops::special_zeta_self_scalar::call(self, other);
- }
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_zeta_other_scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::special_zeta_other_scalar::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, other);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_i0_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::special_i0::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_i0e_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::special_i0e::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_i1_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::special_i1::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_i1e_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::special_i1e::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_logit_generated_plumbing(const at::Tensor & self, c10::optional<double> eps) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::special_logit::call(self, eps);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, eps);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_polygamma_generated_plumbing(int64_t n, const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::special_polygamma::call(n, self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(n, self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_logsumexp_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::special_logsumexp::call(self, dim, keepdim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, keepdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_expit_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::special_expit::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_sinc_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::special_sinc::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_round_generated_plumbing(const at::Tensor & self, int64_t decimals) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::special_round::call(self, decimals);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, decimals);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_log1p_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::special_log1p::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_log_softmax_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::special_log_softmax::call(self, dim, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_gammainc_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::special_gammainc::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_gammaincc_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::special_gammaincc::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_multigammaln_generated_plumbing(const at::Tensor & self, int64_t p) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::special_multigammaln::call(self, p);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, p);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_softmax_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::special_softmax::call(self, dim, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fft_fft_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::fft_fft::call(self, n, dim, norm);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, n, dim, norm);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fft_ifft_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::fft_ifft::call(self, n, dim, norm);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, n, dim, norm);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fft_rfft_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::fft_rfft::call(self, n, dim, norm);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, n, dim, norm);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fft_irfft_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::fft_irfft::call(self, n, dim, norm);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, n, dim, norm);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fft_hfft_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::fft_hfft::call(self, n, dim, norm);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, n, dim, norm);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fft_ihfft_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::fft_ihfft::call(self, n, dim, norm);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, n, dim, norm);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fft_fft2_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::fft_fft2::call(self, s, dim, norm);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, s, dim, norm);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fft_ifft2_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::fft_ifft2::call(self, s, dim, norm);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, s, dim, norm);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fft_rfft2_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::fft_rfft2::call(self, s, dim, norm);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, s, dim, norm);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fft_irfft2_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::fft_irfft2::call(self, s, dim, norm);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, s, dim, norm);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fft_hfft2_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::fft_hfft2::call(self, s, dim, norm);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, s, dim, norm);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fft_ihfft2_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::fft_ihfft2::call(self, s, dim, norm);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, s, dim, norm);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fft_fftn_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::fft_fftn::call(self, s, dim, norm);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, s, dim, norm);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fft_ifftn_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::fft_ifftn::call(self, s, dim, norm);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, s, dim, norm);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fft_rfftn_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::fft_rfftn::call(self, s, dim, norm);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, s, dim, norm);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fft_irfftn_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::fft_irfftn::call(self, s, dim, norm);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, s, dim, norm);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fft_hfftn_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::fft_hfftn::call(self, s, dim, norm);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, s, dim, norm);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fft_ihfftn_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::fft_ihfftn::call(self, s, dim, norm);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, s, dim, norm);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fft_fftshift_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::fft_fftshift::call(self, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor fft_ifftshift_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::fft_ifftshift::call(self, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> linalg_cholesky_ex_generated_plumbing(const at::Tensor & self, bool upper, bool check_errors) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::linalg_cholesky_ex::call(self, upper, check_errors);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, upper, check_errors);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor linalg_cholesky_generated_plumbing(const at::Tensor & self, bool upper) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::linalg_cholesky::call(self, upper);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, upper);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor linalg_cross_generated_plumbing(const at::Tensor & self, const at::Tensor & other, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::linalg_cross::call(self, other, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> linalg_lu_factor_generated_plumbing(const at::Tensor & A, bool pivot) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(A, cur_level)) {
- return at::_ops::linalg_lu_factor::call(A, pivot);
- }
- Tensor A_value;
- optional<int64_t> A_bdim;
- std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
- auto results = batch_rule(A_value, A_bdim, pivot);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_lu_factor_ex_generated_plumbing(const at::Tensor & A, bool pivot, bool check_errors) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(A, cur_level)) {
- return at::_ops::linalg_lu_factor_ex::call(A, pivot, check_errors);
- }
- Tensor A_value;
- optional<int64_t> A_bdim;
- std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
- auto results = batch_rule(A_value, A_bdim, pivot, check_errors);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_lu_generated_plumbing(const at::Tensor & A, bool pivot) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(A, cur_level)) {
- return at::_ops::linalg_lu::call(A, pivot);
- }
- Tensor A_value;
- optional<int64_t> A_bdim;
- std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
- auto results = batch_rule(A_value, A_bdim, pivot);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor linalg_lu_solve_generated_plumbing(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(LU, cur_level) && !isBatchedAtLevel(pivots, cur_level) && !isBatchedAtLevel(B, cur_level)) {
- return at::_ops::linalg_lu_solve::call(LU, pivots, B, left, adjoint);
- }
- Tensor LU_value;
- optional<int64_t> LU_bdim;
- std::tie(LU_value, LU_bdim) = unwrapTensorAtLevel(LU, cur_level);
- Tensor pivots_value;
- optional<int64_t> pivots_bdim;
- std::tie(pivots_value, pivots_bdim) = unwrapTensorAtLevel(pivots, cur_level);
- Tensor B_value;
- optional<int64_t> B_bdim;
- std::tie(B_value, B_bdim) = unwrapTensorAtLevel(B, cur_level);
- auto results = batch_rule(LU_value, LU_bdim, pivots_value, pivots_bdim, B_value, B_bdim, left, adjoint);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _linalg_det_generated_plumbing(const at::Tensor & A) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(A, cur_level)) {
- return at::_ops::_linalg_det::call(A);
- }
- Tensor A_value;
- optional<int64_t> A_bdim;
- std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
- auto results = batch_rule(A_value, A_bdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor linalg_det_generated_plumbing(const at::Tensor & A) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(A, cur_level)) {
- return at::_ops::linalg_det::call(A);
- }
- Tensor A_value;
- optional<int64_t> A_bdim;
- std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
- auto results = batch_rule(A_value, A_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor det_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::det::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_ldl_factor_ex_generated_plumbing(const at::Tensor & self, bool hermitian, bool check_errors) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::linalg_ldl_factor_ex::call(self, hermitian, check_errors);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, hermitian, check_errors);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> linalg_ldl_factor_generated_plumbing(const at::Tensor & self, bool hermitian) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::linalg_ldl_factor::call(self, hermitian);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, hermitian);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor linalg_ldl_solve_generated_plumbing(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(LD, cur_level) && !isBatchedAtLevel(pivots, cur_level) && !isBatchedAtLevel(B, cur_level)) {
- return at::_ops::linalg_ldl_solve::call(LD, pivots, B, hermitian);
- }
- Tensor LD_value;
- optional<int64_t> LD_bdim;
- std::tie(LD_value, LD_bdim) = unwrapTensorAtLevel(LD, cur_level);
- Tensor pivots_value;
- optional<int64_t> pivots_bdim;
- std::tie(pivots_value, pivots_bdim) = unwrapTensorAtLevel(pivots, cur_level);
- Tensor B_value;
- optional<int64_t> B_bdim;
- std::tie(B_value, B_bdim) = unwrapTensorAtLevel(B, cur_level);
- auto results = batch_rule(LD_value, LD_bdim, pivots_value, pivots_bdim, B_value, B_bdim, hermitian);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> linalg_lstsq_generated_plumbing(const at::Tensor & self, const at::Tensor & b, c10::optional<double> rcond, c10::optional<c10::string_view> driver) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(b, cur_level)) {
- return at::_ops::linalg_lstsq::call(self, b, rcond, driver);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor b_value;
- optional<int64_t> b_bdim;
- std::tie(b_value, b_bdim) = unwrapTensorAtLevel(b, cur_level);
- auto results = batch_rule(self_value, self_bdim, b_value, b_bdim, rcond, driver);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor linalg_matmul_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::linalg_matmul::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor linalg_vecdot_generated_plumbing(const at::Tensor & x, const at::Tensor & y, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(y, cur_level)) {
- return at::_ops::linalg_vecdot::call(x, y, dim);
- }
- Tensor x_value;
- optional<int64_t> x_bdim;
- std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
- Tensor y_value;
- optional<int64_t> y_bdim;
- std::tie(y_value, y_bdim) = unwrapTensorAtLevel(y, cur_level);
- auto results = batch_rule(x_value, x_bdim, y_value, y_bdim, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor linalg_matrix_exp_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::linalg_matrix_exp::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _linalg_slogdet_generated_plumbing(const at::Tensor & A) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(A, cur_level)) {
- return at::_ops::_linalg_slogdet::call(A);
- }
- Tensor A_value;
- optional<int64_t> A_bdim;
- std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
- auto results = batch_rule(A_value, A_bdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> linalg_slogdet_generated_plumbing(const at::Tensor & A) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(A, cur_level)) {
- return at::_ops::linalg_slogdet::call(A);
- }
- Tensor A_value;
- optional<int64_t> A_bdim;
- std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
- auto results = batch_rule(A_value, A_bdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> slogdet_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::slogdet::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor logdet_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::logdet::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> linalg_eig_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::linalg_eig::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor linalg_eigvals_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::linalg_eigvals::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> _linalg_eigh_generated_plumbing(const at::Tensor & A, c10::string_view UPLO, bool compute_v) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(A, cur_level)) {
- return at::_ops::_linalg_eigh::call(A, UPLO, compute_v);
- }
- Tensor A_value;
- optional<int64_t> A_bdim;
- std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
- auto results = batch_rule(A_value, A_bdim, UPLO, compute_v);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> linalg_eigh_generated_plumbing(const at::Tensor & self, c10::string_view UPLO) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::linalg_eigh::call(self, UPLO);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, UPLO);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor linalg_eigvalsh_generated_plumbing(const at::Tensor & self, c10::string_view UPLO) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::linalg_eigvalsh::call(self, UPLO);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, UPLO);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor linalg_householder_product_generated_plumbing(const at::Tensor & input, const at::Tensor & tau) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(tau, cur_level)) {
- return at::_ops::linalg_householder_product::call(input, tau);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor tau_value;
- optional<int64_t> tau_bdim;
- std::tie(tau_value, tau_bdim) = unwrapTensorAtLevel(tau, cur_level);
- auto results = batch_rule(input_value, input_bdim, tau_value, tau_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> linalg_inv_ex_generated_plumbing(const at::Tensor & A, bool check_errors) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(A, cur_level)) {
- return at::_ops::linalg_inv_ex::call(A, check_errors);
- }
- Tensor A_value;
- optional<int64_t> A_bdim;
- std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
- auto results = batch_rule(A_value, A_bdim, check_errors);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor linalg_inv_generated_plumbing(const at::Tensor & A) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(A, cur_level)) {
- return at::_ops::linalg_inv::call(A);
- }
- Tensor A_value;
- optional<int64_t> A_bdim;
- std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
- auto results = batch_rule(A_value, A_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor inverse_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::inverse::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor inner_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::inner::call(self, other);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor outer_generated_plumbing(const at::Tensor & self, const at::Tensor & vec2) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(vec2, cur_level)) {
- return at::_ops::outer::call(self, vec2);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor vec2_value;
- optional<int64_t> vec2_bdim;
- std::tie(vec2_value, vec2_bdim) = unwrapTensorAtLevel(vec2, cur_level);
- auto results = batch_rule(self_value, self_bdim, vec2_value, vec2_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor ger_generated_plumbing(const at::Tensor & self, const at::Tensor & vec2) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(vec2, cur_level)) {
- return at::_ops::ger::call(self, vec2);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor vec2_value;
- optional<int64_t> vec2_bdim;
- std::tie(vec2_value, vec2_bdim) = unwrapTensorAtLevel(vec2, cur_level);
- auto results = batch_rule(self_value, self_bdim, vec2_value, vec2_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor linalg_norm_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::linalg_norm::call(self, ord, dim, keepdim, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, ord, dim, keepdim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor linalg_norm_ord_str_generated_plumbing(const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::linalg_norm_ord_str::call(self, ord, dim, keepdim, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, ord, dim, keepdim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor linalg_vector_norm_generated_plumbing(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::linalg_vector_norm::call(self, ord, dim, keepdim, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, ord, dim, keepdim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor linalg_matrix_norm_generated_plumbing(const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::linalg_matrix_norm::call(self, ord, dim, keepdim, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, ord, dim, keepdim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor linalg_matrix_norm_str_ord_generated_plumbing(const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::linalg_matrix_norm_str_ord::call(self, ord, dim, keepdim, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, ord, dim, keepdim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _linalg_svd_generated_plumbing(const at::Tensor & A, bool full_matrices, bool compute_uv, c10::optional<c10::string_view> driver) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(A, cur_level)) {
- return at::_ops::_linalg_svd::call(A, full_matrices, compute_uv, driver);
- }
- Tensor A_value;
- optional<int64_t> A_bdim;
- std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
- auto results = batch_rule(A_value, A_bdim, full_matrices, compute_uv, driver);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_svd_generated_plumbing(const at::Tensor & A, bool full_matrices, c10::optional<c10::string_view> driver) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(A, cur_level)) {
- return at::_ops::linalg_svd::call(A, full_matrices, driver);
- }
- Tensor A_value;
- optional<int64_t> A_bdim;
- std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
- auto results = batch_rule(A_value, A_bdim, full_matrices, driver);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor linalg_svdvals_generated_plumbing(const at::Tensor & A, c10::optional<c10::string_view> driver) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(A, cur_level)) {
- return at::_ops::linalg_svdvals::call(A, driver);
- }
- Tensor A_value;
- optional<int64_t> A_bdim;
- std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
- auto results = batch_rule(A_value, A_bdim, driver);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor linalg_cond_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & p) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::linalg_cond::call(self, p);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, p);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor linalg_cond_p_str_generated_plumbing(const at::Tensor & self, c10::string_view p) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::linalg_cond_p_str::call(self, p);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, p);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor linalg_pinv_atol_rtol_tensor_generated_plumbing(const at::Tensor & self, const c10::optional<at::Tensor> & atol, const c10::optional<at::Tensor> & rtol, bool hermitian) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(atol, cur_level) && !isBatchedAtLevel(rtol, cur_level)) {
- return at::_ops::linalg_pinv_atol_rtol_tensor::call(self, atol, rtol, hermitian);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- optional<Tensor> atol_value;
- optional<int64_t> atol_bdim;
- if (atol) {
- std::tie(atol_value, atol_bdim) = unwrapTensorAtLevel(atol.value(), cur_level);
- }
- optional<Tensor> rtol_value;
- optional<int64_t> rtol_bdim;
- if (rtol) {
- std::tie(rtol_value, rtol_bdim) = unwrapTensorAtLevel(rtol.value(), cur_level);
- }
- auto results = batch_rule(self_value, self_bdim, atol_value, atol_bdim, rtol_value, rtol_bdim, hermitian);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor linalg_pinv_atol_rtol_float_generated_plumbing(const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::linalg_pinv_atol_rtol_float::call(self, atol, rtol, hermitian);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, atol, rtol, hermitian);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor linalg_pinv_generated_plumbing(const at::Tensor & self, double rcond, bool hermitian) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::linalg_pinv::call(self, rcond, hermitian);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, rcond, hermitian);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor linalg_pinv_rcond_tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & rcond, bool hermitian) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(rcond, cur_level)) {
- return at::_ops::linalg_pinv_rcond_tensor::call(self, rcond, hermitian);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor rcond_value;
- optional<int64_t> rcond_bdim;
- std::tie(rcond_value, rcond_bdim) = unwrapTensorAtLevel(rcond, cur_level);
- auto results = batch_rule(self_value, self_bdim, rcond_value, rcond_bdim, hermitian);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _linalg_solve_ex_generated_plumbing(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(A, cur_level) && !isBatchedAtLevel(B, cur_level)) {
- return at::_ops::_linalg_solve_ex::call(A, B, left, check_errors);
- }
- Tensor A_value;
- optional<int64_t> A_bdim;
- std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
- Tensor B_value;
- optional<int64_t> B_bdim;
- std::tie(B_value, B_bdim) = unwrapTensorAtLevel(B, cur_level);
- auto results = batch_rule(A_value, A_bdim, B_value, B_bdim, left, check_errors);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> linalg_solve_ex_generated_plumbing(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(A, cur_level) && !isBatchedAtLevel(B, cur_level)) {
- return at::_ops::linalg_solve_ex::call(A, B, left, check_errors);
- }
- Tensor A_value;
- optional<int64_t> A_bdim;
- std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
- Tensor B_value;
- optional<int64_t> B_bdim;
- std::tie(B_value, B_bdim) = unwrapTensorAtLevel(B, cur_level);
- auto results = batch_rule(A_value, A_bdim, B_value, B_bdim, left, check_errors);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor linalg_solve_generated_plumbing(const at::Tensor & A, const at::Tensor & B, bool left) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(A, cur_level) && !isBatchedAtLevel(B, cur_level)) {
- return at::_ops::linalg_solve::call(A, B, left);
- }
- Tensor A_value;
- optional<int64_t> A_bdim;
- std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
- Tensor B_value;
- optional<int64_t> B_bdim;
- std::tie(B_value, B_bdim) = unwrapTensorAtLevel(B, cur_level);
- auto results = batch_rule(A_value, A_bdim, B_value, B_bdim, left);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor linalg_tensorinv_generated_plumbing(const at::Tensor & self, int64_t ind) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::linalg_tensorinv::call(self, ind);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, ind);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor linalg_tensorsolve_generated_plumbing(const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::linalg_tensorsolve::call(self, other, dims);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, dims);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> linalg_qr_generated_plumbing(const at::Tensor & A, c10::string_view mode) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(A, cur_level)) {
- return at::_ops::linalg_qr::call(A, mode);
- }
- Tensor A_value;
- optional<int64_t> A_bdim;
- std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
- auto results = batch_rule(A_value, A_bdim, mode);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor linalg_matrix_power_generated_plumbing(const at::Tensor & self, int64_t n) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::linalg_matrix_power::call(self, n);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, n);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor linalg_matrix_rank_atol_rtol_tensor_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & atol, const c10::optional<at::Tensor> & rtol, bool hermitian) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(atol, cur_level) && !isBatchedAtLevel(rtol, cur_level)) {
- return at::_ops::linalg_matrix_rank_atol_rtol_tensor::call(input, atol, rtol, hermitian);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- optional<Tensor> atol_value;
- optional<int64_t> atol_bdim;
- if (atol) {
- std::tie(atol_value, atol_bdim) = unwrapTensorAtLevel(atol.value(), cur_level);
- }
- optional<Tensor> rtol_value;
- optional<int64_t> rtol_bdim;
- if (rtol) {
- std::tie(rtol_value, rtol_bdim) = unwrapTensorAtLevel(rtol.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, atol_value, atol_bdim, rtol_value, rtol_bdim, hermitian);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor linalg_matrix_rank_atol_rtol_float_generated_plumbing(const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::linalg_matrix_rank_atol_rtol_float::call(self, atol, rtol, hermitian);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, atol, rtol, hermitian);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor linalg_matrix_rank_generated_plumbing(const at::Tensor & self, double tol, bool hermitian) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::linalg_matrix_rank::call(self, tol, hermitian);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, tol, hermitian);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor linalg_matrix_rank_tol_tensor_generated_plumbing(const at::Tensor & input, const at::Tensor & tol, bool hermitian) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(tol, cur_level)) {
- return at::_ops::linalg_matrix_rank_tol_tensor::call(input, tol, hermitian);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor tol_value;
- optional<int64_t> tol_bdim;
- std::tie(tol_value, tol_bdim) = unwrapTensorAtLevel(tol, cur_level);
- auto results = batch_rule(input_value, input_bdim, tol_value, tol_bdim, hermitian);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor linalg_multi_dot_generated_plumbing(at::TensorList tensors) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(tensors, cur_level)) {
- return at::_ops::linalg_multi_dot::call(tensors);
- }
- auto results = batch_rule(tensors);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor nested_to_padded_tensor_generated_plumbing(const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::nested_to_padded_tensor::call(self, padding, output_size);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, padding, output_size);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _test_serialization_subcmul_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
- return at::_ops::_test_serialization_subcmul::call(self, other, alpha);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor other_value;
- optional<int64_t> other_bdim;
- std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
- auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _test_optional_intlist_generated_plumbing(const at::Tensor & values, at::OptionalIntArrayRef addends) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(values, cur_level)) {
- return at::_ops::_test_optional_intlist::call(values, addends);
- }
- Tensor values_value;
- optional<int64_t> values_bdim;
- std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
- auto results = batch_rule(values_value, values_bdim, addends);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _test_optional_filled_intlist_generated_plumbing(const at::Tensor & values, at::OptionalIntArrayRef addends) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(values, cur_level)) {
- return at::_ops::_test_optional_filled_intlist::call(values, addends);
- }
- Tensor values_value;
- optional<int64_t> values_bdim;
- std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
- auto results = batch_rule(values_value, values_bdim, addends);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _test_optional_floatlist_generated_plumbing(const at::Tensor & values, c10::optional<at::ArrayRef<double>> addends) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(values, cur_level)) {
- return at::_ops::_test_optional_floatlist::call(values, addends);
- }
- Tensor values_value;
- optional<int64_t> values_bdim;
- std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
- auto results = batch_rule(values_value, values_bdim, addends);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _test_string_default_generated_plumbing(const at::Tensor & dummy, c10::string_view a, c10::string_view b) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(dummy, cur_level)) {
- return at::_ops::_test_string_default::call(dummy, a, b);
- }
- Tensor dummy_value;
- optional<int64_t> dummy_bdim;
- std::tie(dummy_value, dummy_bdim) = unwrapTensorAtLevel(dummy, cur_level);
- auto results = batch_rule(dummy_value, dummy_bdim, a, b);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _test_ambiguous_defaults_a_generated_plumbing(const at::Tensor & dummy, int64_t a, int64_t b) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(dummy, cur_level)) {
- return at::_ops::_test_ambiguous_defaults_a::call(dummy, a, b);
- }
- Tensor dummy_value;
- optional<int64_t> dummy_bdim;
- std::tie(dummy_value, dummy_bdim) = unwrapTensorAtLevel(dummy, cur_level);
- auto results = batch_rule(dummy_value, dummy_bdim, a, b);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _test_ambiguous_defaults_b_generated_plumbing(const at::Tensor & dummy, int64_t a, c10::string_view b) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(dummy, cur_level)) {
- return at::_ops::_test_ambiguous_defaults_b::call(dummy, a, b);
- }
- Tensor dummy_value;
- optional<int64_t> dummy_bdim;
- std::tie(dummy_value, dummy_bdim) = unwrapTensorAtLevel(dummy, cur_level);
- auto results = batch_rule(dummy_value, dummy_bdim, a, b);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _test_warn_in_autograd_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_test_warn_in_autograd::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _test_autograd_multiple_dispatch_fullcoverage_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_test_autograd_multiple_dispatch_fullcoverage::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _test_autograd_multiple_dispatch_ntonly_generated_plumbing(const at::Tensor & self, bool b) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_test_autograd_multiple_dispatch_ntonly::call(self, b);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, b);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _test_autograd_multiple_dispatch_view_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_test_autograd_multiple_dispatch_view::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _test_autograd_multiple_dispatch_view_copy_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_test_autograd_multiple_dispatch_view_copy::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor segment_reduce_generated_plumbing(const at::Tensor & data, c10::string_view reduce, const c10::optional<at::Tensor> & lengths, const c10::optional<at::Tensor> & indices, const c10::optional<at::Tensor> & offsets, int64_t axis, bool unsafe, const c10::optional<at::Scalar> & initial) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(lengths, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level)) {
- return at::_ops::segment_reduce::call(data, reduce, lengths, indices, offsets, axis, unsafe, initial);
- }
- Tensor data_value;
- optional<int64_t> data_bdim;
- std::tie(data_value, data_bdim) = unwrapTensorAtLevel(data, cur_level);
- optional<Tensor> lengths_value;
- optional<int64_t> lengths_bdim;
- if (lengths) {
- std::tie(lengths_value, lengths_bdim) = unwrapTensorAtLevel(lengths.value(), cur_level);
- }
- optional<Tensor> indices_value;
- optional<int64_t> indices_bdim;
- if (indices) {
- std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices.value(), cur_level);
- }
- optional<Tensor> offsets_value;
- optional<int64_t> offsets_bdim;
- if (offsets) {
- std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets.value(), cur_level);
- }
- auto results = batch_rule(data_value, data_bdim, reduce, lengths_value, lengths_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, axis, unsafe, initial);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _segment_reduce_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const c10::optional<at::Tensor> & lengths, const c10::optional<at::Tensor> & offsets, int64_t axis, const c10::optional<at::Scalar> & initial) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(lengths, cur_level) && !isBatchedAtLevel(offsets, cur_level)) {
- return at::_ops::_segment_reduce_backward::call(grad, output, data, reduce, lengths, offsets, axis, initial);
- }
- Tensor grad_value;
- optional<int64_t> grad_bdim;
- std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
- Tensor output_value;
- optional<int64_t> output_bdim;
- std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
- Tensor data_value;
- optional<int64_t> data_bdim;
- std::tie(data_value, data_bdim) = unwrapTensorAtLevel(data, cur_level);
- optional<Tensor> lengths_value;
- optional<int64_t> lengths_bdim;
- if (lengths) {
- std::tie(lengths_value, lengths_bdim) = unwrapTensorAtLevel(lengths.value(), cur_level);
- }
- optional<Tensor> offsets_value;
- optional<int64_t> offsets_bdim;
- if (offsets) {
- std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets.value(), cur_level);
- }
- auto results = batch_rule(grad_value, grad_bdim, output_value, output_bdim, data_value, data_bdim, reduce, lengths_value, lengths_bdim, offsets_value, offsets_bdim, axis, initial);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor pad_sequence_generated_plumbing(at::TensorList sequences, bool batch_first, double padding_value) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(sequences, cur_level)) {
- return at::_ops::pad_sequence::call(sequences, batch_first, padding_value);
- }
- auto results = batch_rule(sequences, batch_first, padding_value);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor flatten_dense_tensors_generated_plumbing(at::TensorList tensors) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(tensors, cur_level)) {
- return at::_ops::flatten_dense_tensors::call(tensors);
- }
- auto results = batch_rule(tensors);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> unflatten_dense_tensors_generated_plumbing(const at::Tensor & flat, at::TensorList tensors) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(flat, cur_level) && !isBatchedAtLevel(tensors, cur_level)) {
- return at::_ops::unflatten_dense_tensors::call(flat, tensors);
- }
- Tensor flat_value;
- optional<int64_t> flat_bdim;
- std::tie(flat_value, flat_bdim) = unwrapTensorAtLevel(flat, cur_level);
- auto results = batch_rule(flat_value, flat_bdim, tensors);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _nested_tensor_from_tensor_list_generated_plumbing(at::TensorList list, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(list, cur_level)) {
- return at::_ops::_nested_tensor_from_tensor_list::call(list, dtype, layout, device, pin_memory);
- }
- auto results = batch_rule(list, dtype, layout, device, pin_memory);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _fw_primal_copy_generated_plumbing(const at::Tensor & self, int64_t level) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_fw_primal_copy::call(self, level);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, level);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _make_dual_copy_generated_plumbing(const at::Tensor & primal, const at::Tensor & tangent, int64_t level) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(primal, cur_level) && !isBatchedAtLevel(tangent, cur_level)) {
- return at::_ops::_make_dual_copy::call(primal, tangent, level);
- }
- Tensor primal_value;
- optional<int64_t> primal_bdim;
- std::tie(primal_value, primal_bdim) = unwrapTensorAtLevel(primal, cur_level);
- Tensor tangent_value;
- optional<int64_t> tangent_bdim;
- std::tie(tangent_value, tangent_bdim) = unwrapTensorAtLevel(tangent, cur_level);
- auto results = batch_rule(primal_value, primal_bdim, tangent_value, tangent_bdim, level);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor view_as_real_copy_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::view_as_real_copy::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor view_as_complex_copy_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::view_as_complex_copy::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _conj_copy_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_conj_copy::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _neg_view_copy_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_neg_view_copy::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor as_strided_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::as_strided_copy::call(self, size, stride, storage_offset);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, size, stride, storage_offset);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _sparse_broadcast_to_copy_generated_plumbing(const at::Tensor & self, at::IntArrayRef size) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_sparse_broadcast_to_copy::call(self, size);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, size);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor diagonal_copy_generated_plumbing(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::diagonal_copy::call(self, offset, dim1, dim2);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, offset, dim1, dim2);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor expand_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::expand_copy::call(self, size, implicit);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, size, implicit);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor permute_copy_generated_plumbing(const at::Tensor & self, at::IntArrayRef dims) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::permute_copy::call(self, dims);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dims);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _reshape_alias_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_reshape_alias_copy::call(self, size, stride);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, size, stride);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor select_copy_int_generated_plumbing(const at::Tensor & self, int64_t dim, c10::SymInt index) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::select_copy_int::call(self, dim, index);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, index);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor detach_copy_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::detach_copy::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor slice_copy_Tensor_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::slice_copy_Tensor::call(self, dim, start, end, step);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim, start, end, step);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> split_copy_Tensor_generated_plumbing(const at::Tensor & self, c10::SymInt split_size, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::split_copy_Tensor::call(self, split_size, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, split_size, dim);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> split_with_sizes_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::split_with_sizes_copy::call(self, split_sizes, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, split_sizes, dim);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor squeeze_copy_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::squeeze_copy::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor squeeze_copy_dim_generated_plumbing(const at::Tensor & self, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::squeeze_copy_dim::call(self, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor squeeze_copy_dims_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::squeeze_copy_dims::call(self, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor t_copy_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::t_copy::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor transpose_copy_int_generated_plumbing(const at::Tensor & self, int64_t dim0, int64_t dim1) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::transpose_copy_int::call(self, dim0, dim1);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim0, dim1);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor unsqueeze_copy_generated_plumbing(const at::Tensor & self, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::unsqueeze_copy::call(self, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _indices_copy_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_indices_copy::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _values_copy_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_values_copy::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor indices_copy_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::indices_copy::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor values_copy_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::values_copy::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor crow_indices_copy_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::crow_indices_copy::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor col_indices_copy_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::col_indices_copy::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor ccol_indices_copy_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::ccol_indices_copy::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor row_indices_copy_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::row_indices_copy::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> unbind_copy_int_generated_plumbing(const at::Tensor & self, int64_t dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::unbind_copy_int::call(self, dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dim);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void unbind_copy_int_out_generated_plumbing(const at::Tensor & self, int64_t dim, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::unbind_copy_int_out::call(self, dim, out);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, dim, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void split_copy_Tensor_out_generated_plumbing(const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::split_copy_Tensor_out::call(self, split_size, dim, out);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, split_size, dim, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void split_with_sizes_copy_out_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::split_with_sizes_copy_out::call(self, split_sizes, dim, out);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, split_sizes, dim, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor view_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::view_copy::call(self, size);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, size);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor view_copy_dtype_generated_plumbing(const at::Tensor & self, at::ScalarType dtype) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::view_copy_dtype::call(self, dtype);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dtype);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor unfold_copy_generated_plumbing(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::unfold_copy::call(self, dimension, size, step);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, dimension, size, step);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor alias_copy_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::alias_copy::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor to_padded_tensor_generated_plumbing(const at::Tensor & self, double padding, at::OptionalSymIntArrayRef output_size) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::to_padded_tensor::call(self, padding, output_size);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, padding, output_size);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _nested_tensor_softmax_with_shape_generated_plumbing(const at::Tensor & self, const at::Tensor & query) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(query, cur_level)) {
- return at::_ops::_nested_tensor_softmax_with_shape::call(self, query);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor query_value;
- optional<int64_t> query_bdim;
- std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
- auto results = batch_rule(self_value, self_bdim, query_value, query_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _transformer_encoder_layer_fwd_generated_plumbing(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional<at::Tensor> & mask, c10::optional<int64_t> mask_type) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(src, cur_level) && !isBatchedAtLevel(qkv_weight, cur_level) && !isBatchedAtLevel(qkv_bias, cur_level) && !isBatchedAtLevel(proj_weight, cur_level) && !isBatchedAtLevel(proj_bias, cur_level) && !isBatchedAtLevel(norm_weight_1, cur_level) && !isBatchedAtLevel(norm_bias_1, cur_level) && !isBatchedAtLevel(norm_weight_2, cur_level) && !isBatchedAtLevel(norm_bias_2, cur_level) && !isBatchedAtLevel(ffn_weight_1, cur_level) && !isBatchedAtLevel(ffn_bias_1, cur_level) && !isBatchedAtLevel(ffn_weight_2, cur_level) && !isBatchedAtLevel(ffn_bias_2, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
- return at::_ops::_transformer_encoder_layer_fwd::call(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, mask_type);
- }
- Tensor src_value;
- optional<int64_t> src_bdim;
- std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
- Tensor qkv_weight_value;
- optional<int64_t> qkv_weight_bdim;
- std::tie(qkv_weight_value, qkv_weight_bdim) = unwrapTensorAtLevel(qkv_weight, cur_level);
- Tensor qkv_bias_value;
- optional<int64_t> qkv_bias_bdim;
- std::tie(qkv_bias_value, qkv_bias_bdim) = unwrapTensorAtLevel(qkv_bias, cur_level);
- Tensor proj_weight_value;
- optional<int64_t> proj_weight_bdim;
- std::tie(proj_weight_value, proj_weight_bdim) = unwrapTensorAtLevel(proj_weight, cur_level);
- Tensor proj_bias_value;
- optional<int64_t> proj_bias_bdim;
- std::tie(proj_bias_value, proj_bias_bdim) = unwrapTensorAtLevel(proj_bias, cur_level);
- Tensor norm_weight_1_value;
- optional<int64_t> norm_weight_1_bdim;
- std::tie(norm_weight_1_value, norm_weight_1_bdim) = unwrapTensorAtLevel(norm_weight_1, cur_level);
- Tensor norm_bias_1_value;
- optional<int64_t> norm_bias_1_bdim;
- std::tie(norm_bias_1_value, norm_bias_1_bdim) = unwrapTensorAtLevel(norm_bias_1, cur_level);
- Tensor norm_weight_2_value;
- optional<int64_t> norm_weight_2_bdim;
- std::tie(norm_weight_2_value, norm_weight_2_bdim) = unwrapTensorAtLevel(norm_weight_2, cur_level);
- Tensor norm_bias_2_value;
- optional<int64_t> norm_bias_2_bdim;
- std::tie(norm_bias_2_value, norm_bias_2_bdim) = unwrapTensorAtLevel(norm_bias_2, cur_level);
- Tensor ffn_weight_1_value;
- optional<int64_t> ffn_weight_1_bdim;
- std::tie(ffn_weight_1_value, ffn_weight_1_bdim) = unwrapTensorAtLevel(ffn_weight_1, cur_level);
- Tensor ffn_bias_1_value;
- optional<int64_t> ffn_bias_1_bdim;
- std::tie(ffn_bias_1_value, ffn_bias_1_bdim) = unwrapTensorAtLevel(ffn_bias_1, cur_level);
- Tensor ffn_weight_2_value;
- optional<int64_t> ffn_weight_2_bdim;
- std::tie(ffn_weight_2_value, ffn_weight_2_bdim) = unwrapTensorAtLevel(ffn_weight_2, cur_level);
- Tensor ffn_bias_2_value;
- optional<int64_t> ffn_bias_2_bdim;
- std::tie(ffn_bias_2_value, ffn_bias_2_bdim) = unwrapTensorAtLevel(ffn_bias_2, cur_level);
- optional<Tensor> mask_value;
- optional<int64_t> mask_bdim;
- if (mask) {
- std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask.value(), cur_level);
- }
- auto results = batch_rule(src_value, src_bdim, embed_dim, num_heads, qkv_weight_value, qkv_weight_bdim, qkv_bias_value, qkv_bias_bdim, proj_weight_value, proj_weight_bdim, proj_bias_value, proj_bias_bdim, use_gelu, norm_first, eps, norm_weight_1_value, norm_weight_1_bdim, norm_bias_1_value, norm_bias_1_bdim, norm_weight_2_value, norm_weight_2_bdim, norm_bias_2_value, norm_bias_2_bdim, ffn_weight_1_value, ffn_weight_1_bdim, ffn_bias_1_value, ffn_bias_1_bdim, ffn_weight_2_value, ffn_weight_2_bdim, ffn_bias_2_value, ffn_bias_2_bdim, mask_value, mask_bdim, mask_type);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> _native_multi_head_attention_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask, bool need_weights, bool average_attn_weights, c10::optional<int64_t> mask_type) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(qkv_weight, cur_level) && !isBatchedAtLevel(qkv_bias, cur_level) && !isBatchedAtLevel(proj_weight, cur_level) && !isBatchedAtLevel(proj_bias, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
- return at::_ops::_native_multi_head_attention::call(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, need_weights, average_attn_weights, mask_type);
- }
- Tensor query_value;
- optional<int64_t> query_bdim;
- std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
- Tensor key_value;
- optional<int64_t> key_bdim;
- std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
- Tensor value_value;
- optional<int64_t> value_bdim;
- std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
- Tensor qkv_weight_value;
- optional<int64_t> qkv_weight_bdim;
- std::tie(qkv_weight_value, qkv_weight_bdim) = unwrapTensorAtLevel(qkv_weight, cur_level);
- Tensor qkv_bias_value;
- optional<int64_t> qkv_bias_bdim;
- std::tie(qkv_bias_value, qkv_bias_bdim) = unwrapTensorAtLevel(qkv_bias, cur_level);
- Tensor proj_weight_value;
- optional<int64_t> proj_weight_bdim;
- std::tie(proj_weight_value, proj_weight_bdim) = unwrapTensorAtLevel(proj_weight, cur_level);
- Tensor proj_bias_value;
- optional<int64_t> proj_bias_bdim;
- std::tie(proj_bias_value, proj_bias_bdim) = unwrapTensorAtLevel(proj_bias, cur_level);
- optional<Tensor> mask_value;
- optional<int64_t> mask_bdim;
- if (mask) {
- std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask.value(), cur_level);
- }
- auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, embed_dim, num_head, qkv_weight_value, qkv_weight_bdim, qkv_bias_value, qkv_bias_bdim, proj_weight_value, proj_weight_bdim, proj_bias_value, proj_bias_bdim, mask_value, mask_bdim, need_weights, average_attn_weights, mask_type);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor scaled_dot_product_attention_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(attn_mask, cur_level)) {
- return at::_ops::scaled_dot_product_attention::call(query, key, value, attn_mask, dropout_p, is_causal);
- }
- Tensor query_value;
- optional<int64_t> query_bdim;
- std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
- Tensor key_value;
- optional<int64_t> key_bdim;
- std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
- Tensor value_value;
- optional<int64_t> value_bdim;
- std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
- optional<Tensor> attn_mask_value;
- optional<int64_t> attn_mask_bdim;
- if (attn_mask) {
- std::tie(attn_mask_value, attn_mask_bdim) = unwrapTensorAtLevel(attn_mask.value(), cur_level);
- }
- auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, attn_mask_value, attn_mask_bdim, dropout_p, is_causal);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> _scaled_dot_product_attention_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask, double dropout_p, bool need_attn_weights, bool is_causal) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(attn_mask, cur_level)) {
- return at::_ops::_scaled_dot_product_attention::call(query, key, value, attn_mask, dropout_p, need_attn_weights, is_causal);
- }
- Tensor query_value;
- optional<int64_t> query_bdim;
- std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
- Tensor key_value;
- optional<int64_t> key_bdim;
- std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
- Tensor value_value;
- optional<int64_t> value_bdim;
- std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
- optional<Tensor> attn_mask_value;
- optional<int64_t> attn_mask_bdim;
- if (attn_mask) {
- std::tie(attn_mask_value, attn_mask_bdim) = unwrapTensorAtLevel(attn_mask.value(), cur_level);
- }
- auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, attn_mask_value, attn_mask_bdim, dropout_p, need_attn_weights, is_causal);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> _scaled_dot_product_attention_math_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal, const c10::optional<at::Tensor> & dropout_mask) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(attn_mask, cur_level) && !isBatchedAtLevel(dropout_mask, cur_level)) {
- return at::_ops::_scaled_dot_product_attention_math::call(query, key, value, attn_mask, dropout_p, is_causal, dropout_mask);
- }
- Tensor query_value;
- optional<int64_t> query_bdim;
- std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
- Tensor key_value;
- optional<int64_t> key_bdim;
- std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
- Tensor value_value;
- optional<int64_t> value_bdim;
- std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
- optional<Tensor> attn_mask_value;
- optional<int64_t> attn_mask_bdim;
- if (attn_mask) {
- std::tie(attn_mask_value, attn_mask_bdim) = unwrapTensorAtLevel(attn_mask.value(), cur_level);
- }
- optional<Tensor> dropout_mask_value;
- optional<int64_t> dropout_mask_bdim;
- if (dropout_mask) {
- std::tie(dropout_mask_value, dropout_mask_bdim) = unwrapTensorAtLevel(dropout_mask.value(), cur_level);
- }
- auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, attn_mask_value, attn_mask_bdim, dropout_p, is_causal, dropout_mask_value, dropout_mask_bdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_flash_attention_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal, int64_t philox_seed, int64_t philox_offset) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(out, cur_level) && !isBatchedAtLevel(logsumexp, cur_level) && !isBatchedAtLevel(cum_seq_q, cur_level) && !isBatchedAtLevel(cum_seq_k, cur_level)) {
- return at::_ops::_scaled_dot_product_flash_attention_backward::call(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset);
- }
- Tensor grad_out_value;
- optional<int64_t> grad_out_bdim;
- std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level);
- Tensor query_value;
- optional<int64_t> query_bdim;
- std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
- Tensor key_value;
- optional<int64_t> key_bdim;
- std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
- Tensor value_value;
- optional<int64_t> value_bdim;
- std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
- Tensor out_value;
- optional<int64_t> out_bdim;
- std::tie(out_value, out_bdim) = unwrapTensorAtLevel(out, cur_level);
- Tensor logsumexp_value;
- optional<int64_t> logsumexp_bdim;
- std::tie(logsumexp_value, logsumexp_bdim) = unwrapTensorAtLevel(logsumexp, cur_level);
- Tensor cum_seq_q_value;
- optional<int64_t> cum_seq_q_bdim;
- std::tie(cum_seq_q_value, cum_seq_q_bdim) = unwrapTensorAtLevel(cum_seq_q, cur_level);
- Tensor cum_seq_k_value;
- optional<int64_t> cum_seq_k_bdim;
- std::tie(cum_seq_k_value, cum_seq_k_bdim) = unwrapTensorAtLevel(cum_seq_k, cur_level);
- auto results = batch_rule(grad_out_value, grad_out_bdim, query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, out_value, out_bdim, logsumexp_value, logsumexp_bdim, cum_seq_q_value, cum_seq_q_bdim, cum_seq_k_value, cum_seq_k_bdim, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> _scaled_dot_product_efficient_attention_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, bool compute_log_sumexp, bool is_causal) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level)) {
- return at::_ops::_scaled_dot_product_efficient_attention::call(query, key, value, compute_log_sumexp, is_causal);
- }
- Tensor query_value;
- optional<int64_t> query_bdim;
- std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
- Tensor key_value;
- optional<int64_t> key_bdim;
- std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
- Tensor value_value;
- optional<int64_t> value_bdim;
- std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
- auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, compute_log_sumexp, is_causal);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_efficient_attention_backward_generated_plumbing(const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, bool is_causal, bool chunk_grad_outputs) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_out_, cur_level) && !isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(out, cur_level) && !isBatchedAtLevel(logsumexp, cur_level)) {
- return at::_ops::_scaled_dot_product_efficient_attention_backward::call(grad_out_, query, key, value, out, logsumexp, is_causal, chunk_grad_outputs);
- }
- Tensor grad_out__value;
- optional<int64_t> grad_out__bdim;
- std::tie(grad_out__value, grad_out__bdim) = unwrapTensorAtLevel(grad_out_, cur_level);
- Tensor query_value;
- optional<int64_t> query_bdim;
- std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
- Tensor key_value;
- optional<int64_t> key_bdim;
- std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
- Tensor value_value;
- optional<int64_t> value_bdim;
- std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
- Tensor out_value;
- optional<int64_t> out_bdim;
- std::tie(out_value, out_bdim) = unwrapTensorAtLevel(out, cur_level);
- Tensor logsumexp_value;
- optional<int64_t> logsumexp_bdim;
- std::tie(logsumexp_value, logsumexp_bdim) = unwrapTensorAtLevel(logsumexp, cur_level);
- auto results = batch_rule(grad_out__value, grad_out__bdim, query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, out_value, out_bdim, logsumexp_value, logsumexp_bdim, is_causal, chunk_grad_outputs);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _flash_attention_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal, int64_t philox_seed, int64_t philox_offset) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(out, cur_level) && !isBatchedAtLevel(logsumexp, cur_level) && !isBatchedAtLevel(cum_seq_q, cur_level) && !isBatchedAtLevel(cum_seq_k, cur_level)) {
- return at::_ops::_flash_attention_backward::call(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset);
- }
- Tensor grad_out_value;
- optional<int64_t> grad_out_bdim;
- std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level);
- Tensor query_value;
- optional<int64_t> query_bdim;
- std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
- Tensor key_value;
- optional<int64_t> key_bdim;
- std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
- Tensor value_value;
- optional<int64_t> value_bdim;
- std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
- Tensor out_value;
- optional<int64_t> out_bdim;
- std::tie(out_value, out_bdim) = unwrapTensorAtLevel(out, cur_level);
- Tensor logsumexp_value;
- optional<int64_t> logsumexp_bdim;
- std::tie(logsumexp_value, logsumexp_bdim) = unwrapTensorAtLevel(logsumexp, cur_level);
- Tensor cum_seq_q_value;
- optional<int64_t> cum_seq_q_bdim;
- std::tie(cum_seq_q_value, cum_seq_q_bdim) = unwrapTensorAtLevel(cum_seq_q, cur_level);
- Tensor cum_seq_k_value;
- optional<int64_t> cum_seq_k_bdim;
- std::tie(cum_seq_k_value, cum_seq_k_bdim) = unwrapTensorAtLevel(cum_seq_k, cur_level);
- auto results = batch_rule(grad_out_value, grad_out_bdim, query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, out_value, out_bdim, logsumexp_value, logsumexp_bdim, cum_seq_q_value, cum_seq_q_bdim, cum_seq_k_value, cum_seq_k_bdim, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> _efficient_attention_forward_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & cu_seqlens_q, const c10::optional<at::Tensor> & cu_seqlens_k, c10::optional<int64_t> max_seqlen_q, bool compute_log_sumexp, bool causal) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(cu_seqlens_q, cur_level) && !isBatchedAtLevel(cu_seqlens_k, cur_level)) {
- return at::_ops::_efficient_attention_forward::call(query, key, value, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, compute_log_sumexp, causal);
- }
- Tensor query_value;
- optional<int64_t> query_bdim;
- std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
- Tensor key_value;
- optional<int64_t> key_bdim;
- std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
- Tensor value_value;
- optional<int64_t> value_bdim;
- std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
- optional<Tensor> cu_seqlens_q_value;
- optional<int64_t> cu_seqlens_q_bdim;
- if (cu_seqlens_q) {
- std::tie(cu_seqlens_q_value, cu_seqlens_q_bdim) = unwrapTensorAtLevel(cu_seqlens_q.value(), cur_level);
- }
- optional<Tensor> cu_seqlens_k_value;
- optional<int64_t> cu_seqlens_k_bdim;
- if (cu_seqlens_k) {
- std::tie(cu_seqlens_k_value, cu_seqlens_k_bdim) = unwrapTensorAtLevel(cu_seqlens_k.value(), cur_level);
- }
- auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, cu_seqlens_q_value, cu_seqlens_q_bdim, cu_seqlens_k_value, cu_seqlens_k_bdim, max_seqlen_q, compute_log_sumexp, causal);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _efficient_attention_backward_generated_plumbing(const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, bool is_causal, bool chunk_grad_outputs) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_out_, cur_level) && !isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(out, cur_level) && !isBatchedAtLevel(logsumexp, cur_level)) {
- return at::_ops::_efficient_attention_backward::call(grad_out_, query, key, value, out, logsumexp, is_causal, chunk_grad_outputs);
- }
- Tensor grad_out__value;
- optional<int64_t> grad_out__bdim;
- std::tie(grad_out__value, grad_out__bdim) = unwrapTensorAtLevel(grad_out_, cur_level);
- Tensor query_value;
- optional<int64_t> query_bdim;
- std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
- Tensor key_value;
- optional<int64_t> key_bdim;
- std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
- Tensor value_value;
- optional<int64_t> value_bdim;
- std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
- Tensor out_value;
- optional<int64_t> out_bdim;
- std::tie(out_value, out_bdim) = unwrapTensorAtLevel(out, cur_level);
- Tensor logsumexp_value;
- optional<int64_t> logsumexp_bdim;
- std::tie(logsumexp_value, logsumexp_bdim) = unwrapTensorAtLevel(logsumexp, cur_level);
- auto results = batch_rule(grad_out__value, grad_out__bdim, query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, out_value, out_bdim, logsumexp_value, logsumexp_bdim, is_causal, chunk_grad_outputs);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _triton_scaled_dot_attention_generated_plumbing(const at::Tensor & q, const at::Tensor & k, const at::Tensor & v, double dropout_p) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(q, cur_level) && !isBatchedAtLevel(k, cur_level) && !isBatchedAtLevel(v, cur_level)) {
- return at::_ops::_triton_scaled_dot_attention::call(q, k, v, dropout_p);
- }
- Tensor q_value;
- optional<int64_t> q_bdim;
- std::tie(q_value, q_bdim) = unwrapTensorAtLevel(q, cur_level);
- Tensor k_value;
- optional<int64_t> k_bdim;
- std::tie(k_value, k_bdim) = unwrapTensorAtLevel(k, cur_level);
- Tensor v_value;
- optional<int64_t> v_bdim;
- std::tie(v_value, v_bdim) = unwrapTensorAtLevel(v, cur_level);
- auto results = batch_rule(q_value, q_bdim, k_value, k_bdim, v_value, v_bdim, dropout_p);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _triton_multi_head_attention_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(qkv_weight, cur_level) && !isBatchedAtLevel(qkv_bias, cur_level) && !isBatchedAtLevel(proj_weight, cur_level) && !isBatchedAtLevel(proj_bias, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
- return at::_ops::_triton_multi_head_attention::call(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask);
- }
- Tensor query_value;
- optional<int64_t> query_bdim;
- std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
- Tensor key_value;
- optional<int64_t> key_bdim;
- std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
- Tensor value_value;
- optional<int64_t> value_bdim;
- std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
- Tensor qkv_weight_value;
- optional<int64_t> qkv_weight_bdim;
- std::tie(qkv_weight_value, qkv_weight_bdim) = unwrapTensorAtLevel(qkv_weight, cur_level);
- Tensor qkv_bias_value;
- optional<int64_t> qkv_bias_bdim;
- std::tie(qkv_bias_value, qkv_bias_bdim) = unwrapTensorAtLevel(qkv_bias, cur_level);
- Tensor proj_weight_value;
- optional<int64_t> proj_weight_bdim;
- std::tie(proj_weight_value, proj_weight_bdim) = unwrapTensorAtLevel(proj_weight, cur_level);
- Tensor proj_bias_value;
- optional<int64_t> proj_bias_bdim;
- std::tie(proj_bias_value, proj_bias_bdim) = unwrapTensorAtLevel(proj_bias, cur_level);
- optional<Tensor> mask_value;
- optional<int64_t> mask_bdim;
- if (mask) {
- std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask.value(), cur_level);
- }
- auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, embed_dim, num_head, qkv_weight_value, qkv_weight_bdim, qkv_bias_value, qkv_bias_bdim, proj_weight_value, proj_weight_bdim, proj_bias_value, proj_bias_bdim, mask_value, mask_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_airy_ai_generated_plumbing(const at::Tensor & x) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(x, cur_level)) {
- return at::_ops::special_airy_ai::call(x);
- }
- Tensor x_value;
- optional<int64_t> x_bdim;
- std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
- auto results = batch_rule(x_value, x_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _transformer_decoder_only_layer_fwd_generated_plumbing(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional<at::Tensor> & mask, const c10::optional<at::Tensor> & incr_key, const c10::optional<at::Tensor> & incr_value) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(src, cur_level) && !isBatchedAtLevel(qkv_weight, cur_level) && !isBatchedAtLevel(qkv_bias, cur_level) && !isBatchedAtLevel(proj_weight, cur_level) && !isBatchedAtLevel(proj_bias, cur_level) && !isBatchedAtLevel(norm_weight_1, cur_level) && !isBatchedAtLevel(norm_bias_1, cur_level) && !isBatchedAtLevel(norm_weight_2, cur_level) && !isBatchedAtLevel(norm_bias_2, cur_level) && !isBatchedAtLevel(ffn_weight_1, cur_level) && !isBatchedAtLevel(ffn_bias_1, cur_level) && !isBatchedAtLevel(ffn_weight_2, cur_level) && !isBatchedAtLevel(ffn_bias_2, cur_level) && !isBatchedAtLevel(mask, cur_level) && !isBatchedAtLevel(incr_key, cur_level) && !isBatchedAtLevel(incr_value, cur_level)) {
- return at::_ops::_transformer_decoder_only_layer_fwd::call(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, incr_key, incr_value);
- }
- Tensor src_value;
- optional<int64_t> src_bdim;
- std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
- Tensor qkv_weight_value;
- optional<int64_t> qkv_weight_bdim;
- std::tie(qkv_weight_value, qkv_weight_bdim) = unwrapTensorAtLevel(qkv_weight, cur_level);
- Tensor qkv_bias_value;
- optional<int64_t> qkv_bias_bdim;
- std::tie(qkv_bias_value, qkv_bias_bdim) = unwrapTensorAtLevel(qkv_bias, cur_level);
- Tensor proj_weight_value;
- optional<int64_t> proj_weight_bdim;
- std::tie(proj_weight_value, proj_weight_bdim) = unwrapTensorAtLevel(proj_weight, cur_level);
- Tensor proj_bias_value;
- optional<int64_t> proj_bias_bdim;
- std::tie(proj_bias_value, proj_bias_bdim) = unwrapTensorAtLevel(proj_bias, cur_level);
- Tensor norm_weight_1_value;
- optional<int64_t> norm_weight_1_bdim;
- std::tie(norm_weight_1_value, norm_weight_1_bdim) = unwrapTensorAtLevel(norm_weight_1, cur_level);
- Tensor norm_bias_1_value;
- optional<int64_t> norm_bias_1_bdim;
- std::tie(norm_bias_1_value, norm_bias_1_bdim) = unwrapTensorAtLevel(norm_bias_1, cur_level);
- Tensor norm_weight_2_value;
- optional<int64_t> norm_weight_2_bdim;
- std::tie(norm_weight_2_value, norm_weight_2_bdim) = unwrapTensorAtLevel(norm_weight_2, cur_level);
- Tensor norm_bias_2_value;
- optional<int64_t> norm_bias_2_bdim;
- std::tie(norm_bias_2_value, norm_bias_2_bdim) = unwrapTensorAtLevel(norm_bias_2, cur_level);
- Tensor ffn_weight_1_value;
- optional<int64_t> ffn_weight_1_bdim;
- std::tie(ffn_weight_1_value, ffn_weight_1_bdim) = unwrapTensorAtLevel(ffn_weight_1, cur_level);
- Tensor ffn_bias_1_value;
- optional<int64_t> ffn_bias_1_bdim;
- std::tie(ffn_bias_1_value, ffn_bias_1_bdim) = unwrapTensorAtLevel(ffn_bias_1, cur_level);
- Tensor ffn_weight_2_value;
- optional<int64_t> ffn_weight_2_bdim;
- std::tie(ffn_weight_2_value, ffn_weight_2_bdim) = unwrapTensorAtLevel(ffn_weight_2, cur_level);
- Tensor ffn_bias_2_value;
- optional<int64_t> ffn_bias_2_bdim;
- std::tie(ffn_bias_2_value, ffn_bias_2_bdim) = unwrapTensorAtLevel(ffn_bias_2, cur_level);
- optional<Tensor> mask_value;
- optional<int64_t> mask_bdim;
- if (mask) {
- std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask.value(), cur_level);
- }
- optional<Tensor> incr_key_value;
- optional<int64_t> incr_key_bdim;
- if (incr_key) {
- std::tie(incr_key_value, incr_key_bdim) = unwrapTensorAtLevel(incr_key.value(), cur_level);
- }
- optional<Tensor> incr_value_value;
- optional<int64_t> incr_value_bdim;
- if (incr_value) {
- std::tie(incr_value_value, incr_value_bdim) = unwrapTensorAtLevel(incr_value.value(), cur_level);
- }
- auto results = batch_rule(src_value, src_bdim, embed_dim, num_heads, qkv_weight_value, qkv_weight_bdim, qkv_bias_value, qkv_bias_bdim, proj_weight_value, proj_weight_bdim, proj_bias_value, proj_bias_bdim, use_gelu, norm_first, eps, norm_weight_1_value, norm_weight_1_bdim, norm_bias_1_value, norm_bias_1_bdim, norm_weight_2_value, norm_weight_2_bdim, norm_bias_2_value, norm_bias_2_bdim, ffn_weight_1_value, ffn_weight_1_bdim, ffn_bias_1_value, ffn_bias_1_bdim, ffn_weight_2_value, ffn_weight_2_bdim, ffn_bias_2_value, ffn_bias_2_bdim, mask_value, mask_bdim, incr_key_value, incr_key_bdim, incr_value_value, incr_value_bdim);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _native_decoder_only_multi_head_attention_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask, const c10::optional<at::Tensor> & incr_key, const c10::optional<at::Tensor> & incr_value, bool need_weights, bool average_attn_weights) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(qkv_weight, cur_level) && !isBatchedAtLevel(qkv_bias, cur_level) && !isBatchedAtLevel(proj_weight, cur_level) && !isBatchedAtLevel(proj_bias, cur_level) && !isBatchedAtLevel(mask, cur_level) && !isBatchedAtLevel(incr_key, cur_level) && !isBatchedAtLevel(incr_value, cur_level)) {
- return at::_ops::_native_decoder_only_multi_head_attention::call(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, incr_key, incr_value, need_weights, average_attn_weights);
- }
- Tensor query_value;
- optional<int64_t> query_bdim;
- std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
- Tensor key_value;
- optional<int64_t> key_bdim;
- std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
- Tensor value_value;
- optional<int64_t> value_bdim;
- std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
- Tensor qkv_weight_value;
- optional<int64_t> qkv_weight_bdim;
- std::tie(qkv_weight_value, qkv_weight_bdim) = unwrapTensorAtLevel(qkv_weight, cur_level);
- Tensor qkv_bias_value;
- optional<int64_t> qkv_bias_bdim;
- std::tie(qkv_bias_value, qkv_bias_bdim) = unwrapTensorAtLevel(qkv_bias, cur_level);
- Tensor proj_weight_value;
- optional<int64_t> proj_weight_bdim;
- std::tie(proj_weight_value, proj_weight_bdim) = unwrapTensorAtLevel(proj_weight, cur_level);
- Tensor proj_bias_value;
- optional<int64_t> proj_bias_bdim;
- std::tie(proj_bias_value, proj_bias_bdim) = unwrapTensorAtLevel(proj_bias, cur_level);
- optional<Tensor> mask_value;
- optional<int64_t> mask_bdim;
- if (mask) {
- std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask.value(), cur_level);
- }
- optional<Tensor> incr_key_value;
- optional<int64_t> incr_key_bdim;
- if (incr_key) {
- std::tie(incr_key_value, incr_key_bdim) = unwrapTensorAtLevel(incr_key.value(), cur_level);
- }
- optional<Tensor> incr_value_value;
- optional<int64_t> incr_value_bdim;
- if (incr_value) {
- std::tie(incr_value_value, incr_value_bdim) = unwrapTensorAtLevel(incr_value.value(), cur_level);
- }
- auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, embed_dim, num_head, qkv_weight_value, qkv_weight_bdim, qkv_bias_value, qkv_bias_bdim, proj_weight_value, proj_weight_bdim, proj_bias_value, proj_bias_bdim, mask_value, mask_bdim, incr_key_value, incr_key_bdim, incr_value_value, incr_value_bdim, need_weights, average_attn_weights);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_bessel_j0_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::special_bessel_j0::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_bessel_j1_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::special_bessel_j1::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_bessel_y0_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::special_bessel_y0::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_bessel_y1_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::special_bessel_y1::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_chebyshev_polynomial_t_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
- return at::_ops::special_chebyshev_polynomial_t::call(x, n);
- }
- Tensor x_value;
- optional<int64_t> x_bdim;
- std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
- Tensor n_value;
- optional<int64_t> n_bdim;
- std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
- auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_chebyshev_polynomial_t_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(n, cur_level)) {
- return at::_ops::special_chebyshev_polynomial_t_x_scalar::call(x, n);
- }
- Tensor n_value;
- optional<int64_t> n_bdim;
- std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
- auto results = batch_rule(x, n_value, n_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_chebyshev_polynomial_t_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(x, cur_level)) {
- return at::_ops::special_chebyshev_polynomial_t_n_scalar::call(x, n);
- }
- Tensor x_value;
- optional<int64_t> x_bdim;
- std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
- auto results = batch_rule(x_value, x_bdim, n);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_chebyshev_polynomial_u_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
- return at::_ops::special_chebyshev_polynomial_u::call(x, n);
- }
- Tensor x_value;
- optional<int64_t> x_bdim;
- std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
- Tensor n_value;
- optional<int64_t> n_bdim;
- std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
- auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_chebyshev_polynomial_u_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(n, cur_level)) {
- return at::_ops::special_chebyshev_polynomial_u_x_scalar::call(x, n);
- }
- Tensor n_value;
- optional<int64_t> n_bdim;
- std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
- auto results = batch_rule(x, n_value, n_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_chebyshev_polynomial_u_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(x, cur_level)) {
- return at::_ops::special_chebyshev_polynomial_u_n_scalar::call(x, n);
- }
- Tensor x_value;
- optional<int64_t> x_bdim;
- std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
- auto results = batch_rule(x_value, x_bdim, n);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_chebyshev_polynomial_v_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
- return at::_ops::special_chebyshev_polynomial_v::call(x, n);
- }
- Tensor x_value;
- optional<int64_t> x_bdim;
- std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
- Tensor n_value;
- optional<int64_t> n_bdim;
- std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
- auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_chebyshev_polynomial_v_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(n, cur_level)) {
- return at::_ops::special_chebyshev_polynomial_v_x_scalar::call(x, n);
- }
- Tensor n_value;
- optional<int64_t> n_bdim;
- std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
- auto results = batch_rule(x, n_value, n_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_chebyshev_polynomial_v_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(x, cur_level)) {
- return at::_ops::special_chebyshev_polynomial_v_n_scalar::call(x, n);
- }
- Tensor x_value;
- optional<int64_t> x_bdim;
- std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
- auto results = batch_rule(x_value, x_bdim, n);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_chebyshev_polynomial_w_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
- return at::_ops::special_chebyshev_polynomial_w::call(x, n);
- }
- Tensor x_value;
- optional<int64_t> x_bdim;
- std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
- Tensor n_value;
- optional<int64_t> n_bdim;
- std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
- auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_chebyshev_polynomial_w_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(n, cur_level)) {
- return at::_ops::special_chebyshev_polynomial_w_x_scalar::call(x, n);
- }
- Tensor n_value;
- optional<int64_t> n_bdim;
- std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
- auto results = batch_rule(x, n_value, n_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_chebyshev_polynomial_w_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(x, cur_level)) {
- return at::_ops::special_chebyshev_polynomial_w_n_scalar::call(x, n);
- }
- Tensor x_value;
- optional<int64_t> x_bdim;
- std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
- auto results = batch_rule(x_value, x_bdim, n);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_hermite_polynomial_h_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
- return at::_ops::special_hermite_polynomial_h::call(x, n);
- }
- Tensor x_value;
- optional<int64_t> x_bdim;
- std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
- Tensor n_value;
- optional<int64_t> n_bdim;
- std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
- auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_hermite_polynomial_h_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(n, cur_level)) {
- return at::_ops::special_hermite_polynomial_h_x_scalar::call(x, n);
- }
- Tensor n_value;
- optional<int64_t> n_bdim;
- std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
- auto results = batch_rule(x, n_value, n_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_hermite_polynomial_h_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(x, cur_level)) {
- return at::_ops::special_hermite_polynomial_h_n_scalar::call(x, n);
- }
- Tensor x_value;
- optional<int64_t> x_bdim;
- std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
- auto results = batch_rule(x_value, x_bdim, n);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_hermite_polynomial_he_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
- return at::_ops::special_hermite_polynomial_he::call(x, n);
- }
- Tensor x_value;
- optional<int64_t> x_bdim;
- std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
- Tensor n_value;
- optional<int64_t> n_bdim;
- std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
- auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_hermite_polynomial_he_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(n, cur_level)) {
- return at::_ops::special_hermite_polynomial_he_x_scalar::call(x, n);
- }
- Tensor n_value;
- optional<int64_t> n_bdim;
- std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
- auto results = batch_rule(x, n_value, n_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_hermite_polynomial_he_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(x, cur_level)) {
- return at::_ops::special_hermite_polynomial_he_n_scalar::call(x, n);
- }
- Tensor x_value;
- optional<int64_t> x_bdim;
- std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
- auto results = batch_rule(x_value, x_bdim, n);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_laguerre_polynomial_l_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
- return at::_ops::special_laguerre_polynomial_l::call(x, n);
- }
- Tensor x_value;
- optional<int64_t> x_bdim;
- std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
- Tensor n_value;
- optional<int64_t> n_bdim;
- std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
- auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_laguerre_polynomial_l_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(n, cur_level)) {
- return at::_ops::special_laguerre_polynomial_l_x_scalar::call(x, n);
- }
- Tensor n_value;
- optional<int64_t> n_bdim;
- std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
- auto results = batch_rule(x, n_value, n_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_laguerre_polynomial_l_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(x, cur_level)) {
- return at::_ops::special_laguerre_polynomial_l_n_scalar::call(x, n);
- }
- Tensor x_value;
- optional<int64_t> x_bdim;
- std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
- auto results = batch_rule(x_value, x_bdim, n);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_legendre_polynomial_p_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
- return at::_ops::special_legendre_polynomial_p::call(x, n);
- }
- Tensor x_value;
- optional<int64_t> x_bdim;
- std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
- Tensor n_value;
- optional<int64_t> n_bdim;
- std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
- auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_legendre_polynomial_p_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(n, cur_level)) {
- return at::_ops::special_legendre_polynomial_p_x_scalar::call(x, n);
- }
- Tensor n_value;
- optional<int64_t> n_bdim;
- std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
- auto results = batch_rule(x, n_value, n_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_legendre_polynomial_p_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(x, cur_level)) {
- return at::_ops::special_legendre_polynomial_p_n_scalar::call(x, n);
- }
- Tensor x_value;
- optional<int64_t> x_bdim;
- std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
- auto results = batch_rule(x_value, x_bdim, n);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_modified_bessel_i0_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::special_modified_bessel_i0::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_modified_bessel_i1_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::special_modified_bessel_i1::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_modified_bessel_k0_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::special_modified_bessel_k0::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_modified_bessel_k1_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::special_modified_bessel_k1::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_scaled_modified_bessel_k0_generated_plumbing(const at::Tensor & x) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(x, cur_level)) {
- return at::_ops::special_scaled_modified_bessel_k0::call(x);
- }
- Tensor x_value;
- optional<int64_t> x_bdim;
- std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
- auto results = batch_rule(x_value, x_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_scaled_modified_bessel_k1_generated_plumbing(const at::Tensor & x) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(x, cur_level)) {
- return at::_ops::special_scaled_modified_bessel_k1::call(x);
- }
- Tensor x_value;
- optional<int64_t> x_bdim;
- std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
- auto results = batch_rule(x_value, x_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_shifted_chebyshev_polynomial_t_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
- return at::_ops::special_shifted_chebyshev_polynomial_t::call(x, n);
- }
- Tensor x_value;
- optional<int64_t> x_bdim;
- std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
- Tensor n_value;
- optional<int64_t> n_bdim;
- std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
- auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_shifted_chebyshev_polynomial_t_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(n, cur_level)) {
- return at::_ops::special_shifted_chebyshev_polynomial_t_x_scalar::call(x, n);
- }
- Tensor n_value;
- optional<int64_t> n_bdim;
- std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
- auto results = batch_rule(x, n_value, n_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_shifted_chebyshev_polynomial_t_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(x, cur_level)) {
- return at::_ops::special_shifted_chebyshev_polynomial_t_n_scalar::call(x, n);
- }
- Tensor x_value;
- optional<int64_t> x_bdim;
- std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
- auto results = batch_rule(x_value, x_bdim, n);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_shifted_chebyshev_polynomial_u_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
- return at::_ops::special_shifted_chebyshev_polynomial_u::call(x, n);
- }
- Tensor x_value;
- optional<int64_t> x_bdim;
- std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
- Tensor n_value;
- optional<int64_t> n_bdim;
- std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
- auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_shifted_chebyshev_polynomial_u_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(n, cur_level)) {
- return at::_ops::special_shifted_chebyshev_polynomial_u_x_scalar::call(x, n);
- }
- Tensor n_value;
- optional<int64_t> n_bdim;
- std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
- auto results = batch_rule(x, n_value, n_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_shifted_chebyshev_polynomial_u_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(x, cur_level)) {
- return at::_ops::special_shifted_chebyshev_polynomial_u_n_scalar::call(x, n);
- }
- Tensor x_value;
- optional<int64_t> x_bdim;
- std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
- auto results = batch_rule(x_value, x_bdim, n);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_shifted_chebyshev_polynomial_v_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
- return at::_ops::special_shifted_chebyshev_polynomial_v::call(x, n);
- }
- Tensor x_value;
- optional<int64_t> x_bdim;
- std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
- Tensor n_value;
- optional<int64_t> n_bdim;
- std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
- auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_shifted_chebyshev_polynomial_v_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(n, cur_level)) {
- return at::_ops::special_shifted_chebyshev_polynomial_v_x_scalar::call(x, n);
- }
- Tensor n_value;
- optional<int64_t> n_bdim;
- std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
- auto results = batch_rule(x, n_value, n_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_shifted_chebyshev_polynomial_v_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(x, cur_level)) {
- return at::_ops::special_shifted_chebyshev_polynomial_v_n_scalar::call(x, n);
- }
- Tensor x_value;
- optional<int64_t> x_bdim;
- std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
- auto results = batch_rule(x_value, x_bdim, n);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_shifted_chebyshev_polynomial_w_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
- return at::_ops::special_shifted_chebyshev_polynomial_w::call(x, n);
- }
- Tensor x_value;
- optional<int64_t> x_bdim;
- std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
- Tensor n_value;
- optional<int64_t> n_bdim;
- std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
- auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_shifted_chebyshev_polynomial_w_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(n, cur_level)) {
- return at::_ops::special_shifted_chebyshev_polynomial_w_x_scalar::call(x, n);
- }
- Tensor n_value;
- optional<int64_t> n_bdim;
- std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
- auto results = batch_rule(x, n_value, n_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_shifted_chebyshev_polynomial_w_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(x, cur_level)) {
- return at::_ops::special_shifted_chebyshev_polynomial_w_n_scalar::call(x, n);
- }
- Tensor x_value;
- optional<int64_t> x_bdim;
- std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
- auto results = batch_rule(x_value, x_bdim, n);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor special_spherical_bessel_j0_generated_plumbing(const at::Tensor & x) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(x, cur_level)) {
- return at::_ops::special_spherical_bessel_j0::call(x);
- }
- Tensor x_value;
- optional<int64_t> x_bdim;
- std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
- auto results = batch_rule(x_value, x_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _foobar_generated_plumbing(const at::Tensor & self, bool arg1, bool arg2, bool arg3) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foobar::call(self, arg1, arg2, arg3);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, arg1, arg2, arg3);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _fused_adam__generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
- return at::_ops::_fused_adam_::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
- }
- optional<Tensor> grad_scale_value;
- optional<int64_t> grad_scale_bdim;
- if (grad_scale) {
- std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
- }
- optional<Tensor> found_inf_value;
- optional<int64_t> found_inf_bdim;
- if (found_inf) {
- std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
- }
- batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _fused_adamw__generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
- return at::_ops::_fused_adamw_::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
- }
- optional<Tensor> grad_scale_value;
- optional<int64_t> grad_scale_bdim;
- if (grad_scale) {
- std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
- }
- optional<Tensor> found_inf_value;
- optional<int64_t> found_inf_bdim;
- if (found_inf) {
- std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
- }
- batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _cudnn_rnn_backward_out_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(weight_buf, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(dropout_state, cur_level) && !isBatchedAtLevel(reserve, cur_level) && !isBatchedAtLevel(out0, cur_level) && !isBatchedAtLevel(out1, cur_level) && !isBatchedAtLevel(out2, cur_level) && !isBatchedAtLevel(out3, cur_level)) {
- return at::_ops::_cudnn_rnn_backward_out::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor weight_buf_value;
- optional<int64_t> weight_buf_bdim;
- std::tie(weight_buf_value, weight_buf_bdim) = unwrapTensorAtLevel(weight_buf, cur_level);
- Tensor hx_value;
- optional<int64_t> hx_bdim;
- std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
- Tensor output_value;
- optional<int64_t> output_bdim;
- std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
- Tensor reserve_value;
- optional<int64_t> reserve_bdim;
- std::tie(reserve_value, reserve_bdim) = unwrapTensorAtLevel(reserve, cur_level);
- Tensor out0_value;
- optional<int64_t> out0_bdim;
- std::tie(out0_value, out0_bdim) = unwrapTensorAtLevel(out0, cur_level);
- Tensor out1_value;
- optional<int64_t> out1_bdim;
- std::tie(out1_value, out1_bdim) = unwrapTensorAtLevel(out1, cur_level);
- Tensor out2_value;
- optional<int64_t> out2_bdim;
- std::tie(out2_value, out2_bdim) = unwrapTensorAtLevel(out2, cur_level);
- optional<Tensor> cx_value;
- optional<int64_t> cx_bdim;
- if (cx) {
- std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level);
- }
- optional<Tensor> grad_output_value;
- optional<int64_t> grad_output_bdim;
- if (grad_output) {
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output.value(), cur_level);
- }
- optional<Tensor> grad_hy_value;
- optional<int64_t> grad_hy_bdim;
- if (grad_hy) {
- std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
- }
- optional<Tensor> grad_cy_value;
- optional<int64_t> grad_cy_bdim;
- if (grad_cy) {
- std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
- }
- optional<Tensor> dropout_state_value;
- optional<int64_t> dropout_state_bdim;
- if (dropout_state) {
- std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level);
- }
- batch_rule(input_value, input_bdim, weight, weight_stride0, weight_buf_value, weight_buf_bdim, hx_value, hx_bdim, cx_value, cx_bdim, output_value, output_bdim, grad_output_value, grad_output_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim, reserve_value, reserve_bdim, output_mask, out0_value, out0_bdim, out1_value, out1_bdim, out2_value, out2_bdim, out3);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor bernoulli_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & p, c10::optional<at::Generator> generator) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(p, cur_level)) {
- return at::_ops::bernoulli_Tensor::call(self, p, generator);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor p_value;
- optional<int64_t> p_bdim;
- std::tie(p_value, p_bdim) = unwrapTensorAtLevel(p, cur_level);
- auto results = batch_rule(self_value, self_bdim, p_value, p_bdim, generator);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor embedding_renorm_generated_plumbing(const at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
- return at::_ops::embedding_renorm::call(self, indices, max_norm, norm_type);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor indices_value;
- optional<int64_t> indices_bdim;
- std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
- auto results = batch_rule(self_value, self_bdim, indices_value, indices_bdim, max_norm, norm_type);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor resize_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::resize::call(self, size, memory_format);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, size, memory_format);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _resize_output_generated_plumbing(const at::Tensor & self, at::IntArrayRef size, at::Device device) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_resize_output::call(self, size, device);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, size, device);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _index_put_impl_generated_plumbing(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
- return at::_ops::_index_put_impl::call(self, indices, values, accumulate, unsafe);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor values_value;
- optional<int64_t> values_bdim;
- std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
- auto results = batch_rule(self_value, self_bdim, indices, values_value, values_bdim, accumulate, unsafe);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void miopen_rnn_backward_out_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(weight_buf, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(dropout_state, cur_level) && !isBatchedAtLevel(reserve, cur_level) && !isBatchedAtLevel(out0, cur_level) && !isBatchedAtLevel(out1, cur_level) && !isBatchedAtLevel(out2, cur_level) && !isBatchedAtLevel(out3, cur_level)) {
- return at::_ops::miopen_rnn_backward_out::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor weight_buf_value;
- optional<int64_t> weight_buf_bdim;
- std::tie(weight_buf_value, weight_buf_bdim) = unwrapTensorAtLevel(weight_buf, cur_level);
- Tensor hx_value;
- optional<int64_t> hx_bdim;
- std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
- Tensor output_value;
- optional<int64_t> output_bdim;
- std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
- Tensor reserve_value;
- optional<int64_t> reserve_bdim;
- std::tie(reserve_value, reserve_bdim) = unwrapTensorAtLevel(reserve, cur_level);
- Tensor out0_value;
- optional<int64_t> out0_bdim;
- std::tie(out0_value, out0_bdim) = unwrapTensorAtLevel(out0, cur_level);
- Tensor out1_value;
- optional<int64_t> out1_bdim;
- std::tie(out1_value, out1_bdim) = unwrapTensorAtLevel(out1, cur_level);
- Tensor out2_value;
- optional<int64_t> out2_bdim;
- std::tie(out2_value, out2_bdim) = unwrapTensorAtLevel(out2, cur_level);
- optional<Tensor> cx_value;
- optional<int64_t> cx_bdim;
- if (cx) {
- std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level);
- }
- optional<Tensor> grad_output_value;
- optional<int64_t> grad_output_bdim;
- if (grad_output) {
- std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output.value(), cur_level);
- }
- optional<Tensor> grad_hy_value;
- optional<int64_t> grad_hy_bdim;
- if (grad_hy) {
- std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
- }
- optional<Tensor> grad_cy_value;
- optional<int64_t> grad_cy_bdim;
- if (grad_cy) {
- std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
- }
- optional<Tensor> dropout_state_value;
- optional<int64_t> dropout_state_bdim;
- if (dropout_state) {
- std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level);
- }
- batch_rule(input_value, input_bdim, weight, weight_stride0, weight_buf_value, weight_buf_bdim, hx_value, hx_bdim, cx_value, cx_bdim, output_value, output_bdim, grad_output_value, grad_output_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim, reserve_value, reserve_bdim, output_mask, out0_value, out0_bdim, out1_value, out1_bdim, out2_value, out2_bdim, out3);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit_functional_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & running_mean, const at::Tensor & running_var, bool training, double momentum, double eps) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
- return at::_ops::_native_batch_norm_legit_functional::call(input, weight, bias, running_mean, running_var, training, momentum, eps);
- }
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor running_mean_value;
- optional<int64_t> running_mean_bdim;
- std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean, cur_level);
- Tensor running_var_value;
- optional<int64_t> running_var_bdim;
- std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var, cur_level);
- optional<Tensor> weight_value;
- optional<int64_t> weight_bdim;
- if (weight) {
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
- }
- optional<Tensor> bias_value;
- optional<int64_t> bias_bdim;
- if (bias) {
- std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
- }
- auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, training, momentum, eps);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void unsafe_split_Tensor_out_generated_plumbing(const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::unsafe_split_Tensor_out::call(self, split_size, dim, out);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, split_size, dim, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void unsafe_split_with_sizes_out_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::unsafe_split_with_sizes_out::call(self, split_sizes, dim, out);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- batch_rule(self_value, self_bdim, split_sizes, dim, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor resize_as_generated_plumbing(const at::Tensor & self, const at::Tensor & the_template, c10::optional<at::MemoryFormat> memory_format) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(the_template, cur_level)) {
- return at::_ops::resize_as::call(self, the_template, memory_format);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor the_template_value;
- optional<int64_t> the_template_bdim;
- std::tie(the_template_value, the_template_bdim) = unwrapTensorAtLevel(the_template, cur_level);
- auto results = batch_rule(self_value, self_bdim, the_template_value, the_template_bdim, memory_format);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor resize_as_sparse_generated_plumbing(const at::Tensor & self, const at::Tensor & the_template) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(the_template, cur_level)) {
- return at::_ops::resize_as_sparse::call(self, the_template);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor the_template_value;
- optional<int64_t> the_template_bdim;
- std::tie(the_template_value, the_template_bdim) = unwrapTensorAtLevel(the_template, cur_level);
- auto results = batch_rule(self_value, self_bdim, the_template_value, the_template_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor zero_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::zero::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor sparse_resize_generated_plumbing(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::sparse_resize::call(self, size, sparse_dim, dense_dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, size, sparse_dim, dense_dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor sparse_resize_and_clear_generated_plumbing(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::sparse_resize_and_clear::call(self, size, sparse_dim, dense_dim);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, size, sparse_dim, dense_dim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor _coalesced_generated_plumbing(const at::Tensor & self, bool coalesced) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_coalesced::call(self, coalesced);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, coalesced);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor copy_sparse_to_sparse_generated_plumbing(const at::Tensor & self, const at::Tensor & src, bool non_blocking) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
- return at::_ops::copy_sparse_to_sparse::call(self, src, non_blocking);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor src_value;
- optional<int64_t> src_bdim;
- std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
- auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, non_blocking);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void quantize_per_tensor_tensors_out_generated_plumbing(at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(tensors, cur_level) && !isBatchedAtLevel(scales, cur_level) && !isBatchedAtLevel(zero_points, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::quantize_per_tensor_tensors_out::call(tensors, scales, zero_points, dtype, out);
- }
- Tensor scales_value;
- optional<int64_t> scales_bdim;
- std::tie(scales_value, scales_bdim) = unwrapTensorAtLevel(scales, cur_level);
- Tensor zero_points_value;
- optional<int64_t> zero_points_bdim;
- std::tie(zero_points_value, zero_points_bdim) = unwrapTensorAtLevel(zero_points, cur_level);
- batch_rule(tensors, scales_value, scales_bdim, zero_points_value, zero_points_bdim, dtype, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void dequantize_tensors_out_generated_plumbing(at::TensorList tensors, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(tensors, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::dequantize_tensors_out::call(tensors, out);
- }
- batch_rule(tensors, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _fused_moving_avg_obs_fq_helper_functional_generated_plumbing(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, const at::Tensor & running_min, const at::Tensor & running_max, const at::Tensor & scale, const at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(observer_on, cur_level) && !isBatchedAtLevel(fake_quant_on, cur_level) && !isBatchedAtLevel(running_min, cur_level) && !isBatchedAtLevel(running_max, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
- return at::_ops::_fused_moving_avg_obs_fq_helper_functional::call(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor observer_on_value;
- optional<int64_t> observer_on_bdim;
- std::tie(observer_on_value, observer_on_bdim) = unwrapTensorAtLevel(observer_on, cur_level);
- Tensor fake_quant_on_value;
- optional<int64_t> fake_quant_on_bdim;
- std::tie(fake_quant_on_value, fake_quant_on_bdim) = unwrapTensorAtLevel(fake_quant_on, cur_level);
- Tensor running_min_value;
- optional<int64_t> running_min_bdim;
- std::tie(running_min_value, running_min_bdim) = unwrapTensorAtLevel(running_min, cur_level);
- Tensor running_max_value;
- optional<int64_t> running_max_bdim;
- std::tie(running_max_value, running_max_bdim) = unwrapTensorAtLevel(running_max, cur_level);
- Tensor scale_value;
- optional<int64_t> scale_bdim;
- std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
- Tensor zero_point_value;
- optional<int64_t> zero_point_bdim;
- std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
- auto results = batch_rule(self_value, self_bdim, observer_on_value, observer_on_bdim, fake_quant_on_value, fake_quant_on_bdim, running_min_value, running_min_bdim, running_max_value, running_max_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level), makeBatched(std::get<10>(results), std::get<11>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void lstm_mps_backward_out_generated_plumbing(const at::Tensor & grad_y, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::TensorList out1, at::TensorList out2) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(grad_y, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(z_state, cur_level) && !isBatchedAtLevel(cell_state_fwd, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(layersOutputs, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level) && !isBatchedAtLevel(out0, cur_level) && !isBatchedAtLevel(out1, cur_level) && !isBatchedAtLevel(out2, cur_level)) {
- return at::_ops::lstm_mps_backward_out::call(grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, layersOutputs, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0, out1, out2);
- }
- Tensor grad_y_value;
- optional<int64_t> grad_y_bdim;
- std::tie(grad_y_value, grad_y_bdim) = unwrapTensorAtLevel(grad_y, cur_level);
- Tensor z_state_value;
- optional<int64_t> z_state_bdim;
- std::tie(z_state_value, z_state_bdim) = unwrapTensorAtLevel(z_state, cur_level);
- Tensor cell_state_fwd_value;
- optional<int64_t> cell_state_fwd_bdim;
- std::tie(cell_state_fwd_value, cell_state_fwd_bdim) = unwrapTensorAtLevel(cell_state_fwd, cur_level);
- Tensor input_value;
- optional<int64_t> input_bdim;
- std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
- Tensor layersOutputs_value;
- optional<int64_t> layersOutputs_bdim;
- std::tie(layersOutputs_value, layersOutputs_bdim) = unwrapTensorAtLevel(layersOutputs, cur_level);
- Tensor out0_value;
- optional<int64_t> out0_bdim;
- std::tie(out0_value, out0_bdim) = unwrapTensorAtLevel(out0, cur_level);
- optional<Tensor> grad_hy_value;
- optional<int64_t> grad_hy_bdim;
- if (grad_hy) {
- std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
- }
- optional<Tensor> grad_cy_value;
- optional<int64_t> grad_cy_bdim;
- if (grad_cy) {
- std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
- }
- batch_rule(grad_y_value, grad_y_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, z_state_value, z_state_bdim, cell_state_fwd_value, cell_state_fwd_bdim, input_value, input_bdim, layersOutputs_value, layersOutputs_bdim, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0_value, out0_bdim, out1, out2);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor set_source_Storage_generated_plumbing(const at::Tensor & self, at::Storage source) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::set_source_Storage::call(self, source);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, source);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor set_source_Storage_storage_offset_generated_plumbing(const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::set_source_Storage_storage_offset::call(self, source, storage_offset, size, stride);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, source, storage_offset, size, stride);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor set_source_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & source) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(source, cur_level)) {
- return at::_ops::set_source_Tensor::call(self, source);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor source_value;
- optional<int64_t> source_bdim;
- std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
- auto results = batch_rule(self_value, self_bdim, source_value, source_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor set_generated_plumbing(const at::Tensor & self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::set::call(self);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor random_from_generated_plumbing(const at::Tensor & self, int64_t from, c10::optional<int64_t> to, c10::optional<at::Generator> generator) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::random_from::call(self, from, to, generator);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, from, to, generator);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor random_to_generated_plumbing(const at::Tensor & self, int64_t to, c10::optional<at::Generator> generator) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::random_to::call(self, to, generator);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, to, generator);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor random_generated_plumbing(const at::Tensor & self, c10::optional<at::Generator> generator) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::random::call(self, generator);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, generator);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor uniform_generated_plumbing(const at::Tensor & self, double from, double to, c10::optional<at::Generator> generator) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::uniform::call(self, from, to, generator);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, from, to, generator);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor cauchy_generated_plumbing(const at::Tensor & self, double median, double sigma, c10::optional<at::Generator> generator) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::cauchy::call(self, median, sigma, generator);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, median, sigma, generator);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor log_normal_generated_plumbing(const at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::log_normal::call(self, mean, std, generator);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, mean, std, generator);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor exponential_generated_plumbing(const at::Tensor & self, double lambd, c10::optional<at::Generator> generator) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::exponential::call(self, lambd, generator);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, lambd, generator);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- at::Tensor geometric_generated_plumbing(const at::Tensor & self, double p, c10::optional<at::Generator> generator) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::geometric::call(self, p, generator);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- auto results = batch_rule(self_value, self_bdim, p, generator);
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _histogramdd_bin_edges_out_generated_plumbing(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_histogramdd_bin_edges_out::call(self, bins, range, weight, density, out);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- optional<Tensor> weight_value;
- optional<int64_t> weight_bdim;
- if (weight) {
- std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
- }
- batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _amp_foreach_non_finite_check_and_unscale_out_generated_plumbing(at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(inv_scale, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_amp_foreach_non_finite_check_and_unscale_out::call(self, found_inf, inv_scale, out);
- }
- Tensor found_inf_value;
- optional<int64_t> found_inf_bdim;
- std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf, cur_level);
- Tensor inv_scale_value;
- optional<int64_t> inv_scale_bdim;
- std::tie(inv_scale_value, inv_scale_bdim) = unwrapTensorAtLevel(inv_scale, cur_level);
- batch_rule(self, found_inf_value, found_inf_bdim, inv_scale_value, inv_scale_bdim, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<::std::vector<at::Tensor>,at::Tensor> _amp_foreach_non_finite_check_and_unscale_generated_plumbing(at::TensorList self, const at::Tensor & found_inf, const at::Tensor & inv_scale) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(inv_scale, cur_level)) {
- return at::_ops::_amp_foreach_non_finite_check_and_unscale::call(self, found_inf, inv_scale);
- }
- Tensor found_inf_value;
- optional<int64_t> found_inf_bdim;
- std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf, cur_level);
- Tensor inv_scale_value;
- optional<int64_t> inv_scale_bdim;
- std::tie(inv_scale_value, inv_scale_bdim) = unwrapTensorAtLevel(inv_scale, cur_level);
- auto results = batch_rule(self, found_inf_value, found_inf_bdim, inv_scale_value, inv_scale_bdim);
- return std::make_tuple(makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<at::Tensor,at::Tensor> _amp_update_scale_generated_plumbing(const at::Tensor & self, const at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(growth_tracker, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
- return at::_ops::_amp_update_scale::call(self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval);
- }
- Tensor self_value;
- optional<int64_t> self_bdim;
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
- Tensor growth_tracker_value;
- optional<int64_t> growth_tracker_bdim;
- std::tie(growth_tracker_value, growth_tracker_bdim) = unwrapTensorAtLevel(growth_tracker, cur_level);
- Tensor found_inf_value;
- optional<int64_t> found_inf_bdim;
- std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf, cur_level);
- auto results = batch_rule(self_value, self_bdim, growth_tracker_value, growth_tracker_bdim, found_inf_value, found_inf_bdim, scale_growth_factor, scale_backoff_factor, growth_interval);
- return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_add_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_add_Scalar_out::call(self, scalar, out);
- }
- batch_rule(self, scalar, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_sub_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_sub_Scalar_out::call(self, scalar, out);
- }
- batch_rule(self, scalar, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_mul_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_mul_Scalar_out::call(self, scalar, out);
- }
- batch_rule(self, scalar, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_div_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_div_Scalar_out::call(self, scalar, out);
- }
- batch_rule(self, scalar, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_clamp_min_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_clamp_min_Scalar_out::call(self, scalar, out);
- }
- batch_rule(self, scalar, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_clamp_max_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_clamp_max_Scalar_out::call(self, scalar, out);
- }
- batch_rule(self, scalar, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_maximum_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_maximum_Scalar_out::call(self, scalar, out);
- }
- batch_rule(self, scalar, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_minimum_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_minimum_Scalar_out::call(self, scalar, out);
- }
- batch_rule(self, scalar, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_add_List_out_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_add_List_out::call(self, other, alpha, out);
- }
- batch_rule(self, other, alpha, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_sub_List_out_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_sub_List_out::call(self, other, alpha, out);
- }
- batch_rule(self, other, alpha, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_mul_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_mul_List_out::call(self, other, out);
- }
- batch_rule(self, other, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_div_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_div_List_out::call(self, other, out);
- }
- batch_rule(self, other, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_clamp_min_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_clamp_min_List_out::call(self, other, out);
- }
- batch_rule(self, other, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_clamp_max_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_clamp_max_List_out::call(self, other, out);
- }
- batch_rule(self, other, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_maximum_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_maximum_List_out::call(self, other, out);
- }
- batch_rule(self, other, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_minimum_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_minimum_List_out::call(self, other, out);
- }
- batch_rule(self, other, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_add_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_add_ScalarList_out::call(self, scalars, out);
- }
- batch_rule(self, scalars, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_sub_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_sub_ScalarList_out::call(self, scalars, out);
- }
- batch_rule(self, scalars, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_div_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_div_ScalarList_out::call(self, scalars, out);
- }
- batch_rule(self, scalars, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_mul_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_mul_ScalarList_out::call(self, scalars, out);
- }
- batch_rule(self, scalars, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_clamp_min_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_clamp_min_ScalarList_out::call(self, scalars, out);
- }
- batch_rule(self, scalars, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_clamp_max_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_clamp_max_ScalarList_out::call(self, scalars, out);
- }
- batch_rule(self, scalars, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_maximum_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_maximum_ScalarList_out::call(self, scalars, out);
- }
- batch_rule(self, scalars, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_minimum_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_minimum_ScalarList_out::call(self, scalars, out);
- }
- batch_rule(self, scalars, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_exp_out_generated_plumbing(at::TensorList self, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_exp_out::call(self, out);
- }
- batch_rule(self, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_zero_out_generated_plumbing(at::TensorList self, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_zero_out::call(self, out);
- }
- batch_rule(self, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::vector<at::Tensor> _foreach_zero_generated_plumbing(at::TensorList self) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level)) {
- return at::_ops::_foreach_zero::call(self);
- }
- auto results = batch_rule(self);
- return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_sqrt_out_generated_plumbing(at::TensorList self, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_sqrt_out::call(self, out);
- }
- batch_rule(self, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_abs_out_generated_plumbing(at::TensorList self, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_abs_out::call(self, out);
- }
- batch_rule(self, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_acos_out_generated_plumbing(at::TensorList self, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_acos_out::call(self, out);
- }
- batch_rule(self, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_asin_out_generated_plumbing(at::TensorList self, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_asin_out::call(self, out);
- }
- batch_rule(self, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_atan_out_generated_plumbing(at::TensorList self, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_atan_out::call(self, out);
- }
- batch_rule(self, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_ceil_out_generated_plumbing(at::TensorList self, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_ceil_out::call(self, out);
- }
- batch_rule(self, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_cos_out_generated_plumbing(at::TensorList self, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_cos_out::call(self, out);
- }
- batch_rule(self, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_cosh_out_generated_plumbing(at::TensorList self, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_cosh_out::call(self, out);
- }
- batch_rule(self, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_erf_out_generated_plumbing(at::TensorList self, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_erf_out::call(self, out);
- }
- batch_rule(self, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_erfc_out_generated_plumbing(at::TensorList self, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_erfc_out::call(self, out);
- }
- batch_rule(self, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_expm1_out_generated_plumbing(at::TensorList self, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_expm1_out::call(self, out);
- }
- batch_rule(self, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_floor_out_generated_plumbing(at::TensorList self, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_floor_out::call(self, out);
- }
- batch_rule(self, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_log_out_generated_plumbing(at::TensorList self, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_log_out::call(self, out);
- }
- batch_rule(self, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_log10_out_generated_plumbing(at::TensorList self, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_log10_out::call(self, out);
- }
- batch_rule(self, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_log1p_out_generated_plumbing(at::TensorList self, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_log1p_out::call(self, out);
- }
- batch_rule(self, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_log2_out_generated_plumbing(at::TensorList self, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_log2_out::call(self, out);
- }
- batch_rule(self, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_neg_out_generated_plumbing(at::TensorList self, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_neg_out::call(self, out);
- }
- batch_rule(self, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_tan_out_generated_plumbing(at::TensorList self, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_tan_out::call(self, out);
- }
- batch_rule(self, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_tanh_out_generated_plumbing(at::TensorList self, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_tanh_out::call(self, out);
- }
- batch_rule(self, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_sin_out_generated_plumbing(at::TensorList self, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_sin_out::call(self, out);
- }
- batch_rule(self, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_sinh_out_generated_plumbing(at::TensorList self, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_sinh_out::call(self, out);
- }
- batch_rule(self, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_round_out_generated_plumbing(at::TensorList self, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_round_out::call(self, out);
- }
- batch_rule(self, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_lgamma_out_generated_plumbing(at::TensorList self, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_lgamma_out::call(self, out);
- }
- batch_rule(self, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_frac_out_generated_plumbing(at::TensorList self, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_frac_out::call(self, out);
- }
- batch_rule(self, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_reciprocal_out_generated_plumbing(at::TensorList self, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_reciprocal_out::call(self, out);
- }
- batch_rule(self, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_sigmoid_out_generated_plumbing(at::TensorList self, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_sigmoid_out::call(self, out);
- }
- batch_rule(self, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_trunc_out_generated_plumbing(at::TensorList self, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_trunc_out::call(self, out);
- }
- batch_rule(self, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_addcdiv_Scalar_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_addcdiv_Scalar_out::call(self, tensor1, tensor2, value, out);
- }
- batch_rule(self, tensor1, tensor2, value, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_addcmul_Scalar_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_addcmul_Scalar_out::call(self, tensor1, tensor2, value, out);
- }
- batch_rule(self, tensor1, tensor2, value, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_addcdiv_ScalarList_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_addcdiv_ScalarList_out::call(self, tensor1, tensor2, scalars, out);
- }
- batch_rule(self, tensor1, tensor2, scalars, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_addcdiv_Tensor_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_addcdiv_Tensor_out::call(self, tensor1, tensor2, scalars, out);
- }
- Tensor scalars_value;
- optional<int64_t> scalars_bdim;
- std::tie(scalars_value, scalars_bdim) = unwrapTensorAtLevel(scalars, cur_level);
- batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_addcmul_ScalarList_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_addcmul_ScalarList_out::call(self, tensor1, tensor2, scalars, out);
- }
- batch_rule(self, tensor1, tensor2, scalars, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_addcmul_Tensor_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_addcmul_Tensor_out::call(self, tensor1, tensor2, scalars, out);
- }
- Tensor scalars_value;
- optional<int64_t> scalars_bdim;
- std::tie(scalars_value, scalars_bdim) = unwrapTensorAtLevel(scalars, cur_level);
- batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_norm_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & ord, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_norm_Scalar_out::call(self, ord, out);
- }
- batch_rule(self, ord, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_lerp_List_out_generated_plumbing(at::TensorList self, at::TensorList tensors1, at::TensorList weights, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level) && !isBatchedAtLevel(weights, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_lerp_List_out::call(self, tensors1, weights, out);
- }
- batch_rule(self, tensors1, weights, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _foreach_lerp_Scalar_out_generated_plumbing(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_foreach_lerp_Scalar_out::call(self, tensors1, weight, out);
- }
- batch_rule(self, tensors1, weight, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _fused_adam_out_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_fused_adam_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out);
- }
- optional<Tensor> grad_scale_value;
- optional<int64_t> grad_scale_bdim;
- if (grad_scale) {
- std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
- }
- optional<Tensor> found_inf_value;
- optional<int64_t> found_inf_bdim;
- if (found_inf) {
- std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
- }
- batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_adam_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
- return at::_ops::_fused_adam::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
- }
- optional<Tensor> grad_scale_value;
- optional<int64_t> grad_scale_bdim;
- if (grad_scale) {
- std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
- }
- optional<Tensor> found_inf_value;
- optional<int64_t> found_inf_bdim;
- if (found_inf) {
- std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
- }
- auto results = batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim);
- return std::make_tuple(makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level), makeBatchedVector(std::get<4>(results), std::get<5>(results), cur_level), makeBatchedVector(std::get<6>(results), std::get<7>(results), cur_level), makeBatchedVector(std::get<8>(results), std::get<9>(results), cur_level));
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- void _fused_adamw_out_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf, at::TensorList out) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(out, cur_level)) {
- return at::_ops::_fused_adamw_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out);
- }
- optional<Tensor> grad_scale_value;
- optional<int64_t> grad_scale_bdim;
- if (grad_scale) {
- std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
- }
- optional<Tensor> found_inf_value;
- optional<int64_t> found_inf_bdim;
- if (found_inf) {
- std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
- }
- batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim, out);
- }
- template <typename batch_rule_t, batch_rule_t batch_rule>
- ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_adamw_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf) {
- c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
- auto maybe_layer = maybeCurrentDynamicLayer();
- vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
- int64_t cur_level = maybe_layer->layerId();
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
- return at::_ops::_fused_adamw::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
- }
- optional<Tensor> grad_scale_value;
- optional<int64_t> grad_scale_bdim;
- if (grad_scale) {
- std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
- }
- optional<Tensor> found_inf_value;
- optional<int64_t> found_inf_bdim;
- if (found_inf) {
- std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
- }
- auto results = batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim);
- return std::make_tuple(makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level), makeBatchedVector(std::get<4>(results), std::get<5>(results), cur_level), makeBatchedVector(std::get<6>(results), std::get<7>(results), cur_level), makeBatchedVector(std::get<8>(results), std::get<9>(results), cur_level));
- }
- }} // namespace at::functorch
|