gen.py 105 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838
  1. import argparse
  2. import functools
  3. import json
  4. import os
  5. import pathlib
  6. from collections import defaultdict, namedtuple, OrderedDict
  7. from dataclasses import dataclass
  8. from typing import (
  9. Any,
  10. Callable,
  11. Dict,
  12. List,
  13. Optional,
  14. Sequence,
  15. Set,
  16. Tuple,
  17. TypeVar,
  18. Union,
  19. )
  20. import yaml
  21. from typing_extensions import Literal # Python 3.8+
  22. import torchgen.api.dispatcher as dispatcher
  23. import torchgen.api.meta as meta
  24. import torchgen.api.native as native
  25. import torchgen.api.structured as structured
  26. import torchgen.dest as dest
  27. from torchgen.api import cpp
  28. from torchgen.api.translate import translate
  29. from torchgen.api.types import (
  30. Binding,
  31. CppSignature,
  32. CppSignatureGroup,
  33. DispatcherSignature,
  34. NamedCType,
  35. NativeSignature,
  36. SpecialArgName,
  37. )
  38. from torchgen.context import (
  39. method_with_native_function,
  40. native_function_manager,
  41. with_native_function,
  42. with_native_function_and_indices,
  43. )
  44. from torchgen.gen_functionalization_type import (
  45. gen_functionalization_definition,
  46. gen_functionalization_registration,
  47. gen_functionalization_view_inverse_declaration,
  48. GenCompositeViewCopyKernel,
  49. )
  50. from torchgen.gen_vmap_plumbing import gen_all_vmap_plumbing
  51. from torchgen.model import (
  52. Argument,
  53. BackendIndex,
  54. BackendMetadata,
  55. BaseOperatorName,
  56. DEFAULT_KERNEL_NAMESPACE,
  57. DispatchKey,
  58. FRAGMENT_NAMESPACES,
  59. FunctionSchema,
  60. is_cuda_dispatch_key,
  61. is_generic_dispatch_key,
  62. is_ufunc_dispatch_key,
  63. Location,
  64. NativeFunction,
  65. NativeFunctionsGroup,
  66. NativeFunctionsViewGroup,
  67. OperatorName,
  68. OptionalType,
  69. SchemaKind,
  70. SelfArgument,
  71. STRUCTURED_DISPATCH_KEYS,
  72. TensorOptionsArguments,
  73. Type,
  74. Variant,
  75. ViewSchemaKind,
  76. )
  77. from torchgen.native_function_generation import (
  78. add_generated_native_functions,
  79. gen_composite_functional_kernel,
  80. gen_composite_out_kernel,
  81. pre_group_native_functions,
  82. )
  83. from torchgen.selective_build.selector import SelectiveBuilder
  84. from torchgen.utils import (
  85. assert_never,
  86. concatMap,
  87. context,
  88. FileManager,
  89. make_file_manager,
  90. mapMaybe,
  91. NamespaceHelper,
  92. Target,
  93. YamlDumper,
  94. YamlLoader,
  95. )
  96. T = TypeVar("T")
  97. # Welcome to the ATen code generator v2! The ATen code generator is
  98. # responsible for parsing native_functions.yaml and then generating
  99. # various generated files (e.g., TypeDefault.cpp) based on the operators
  100. # defined in this file. This means that the code generator knows how to
  101. # parse function schema, and then translate this into various C++ types
  102. # and boilerplate code.
  103. #
  104. # Some things to know about this file when you modify it:
  105. #
  106. # - This file has STRICT mypy typechecking. Typecheck it with
  107. # `mypy --config mypy-strict.ini` in the root source directory
  108. #
  109. # - Most of the heavy lifting lives in external modules:
  110. # - 'model' has the data model for native_functions.yaml. The classes
  111. # in those file represent what you see when you look at
  112. # a native_functions.yaml
  113. # - 'api' has conversions for how to translate JIT schema into
  114. # the various C++ APIs that the codegen interacts with. There
  115. # are in fact THREE different C++ APIs: the public C++ API,
  116. # the dispatcher API, and the legacy dispatcher API. See each
  117. # of these respective files for more information
  118. # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
  119. #
  120. # HELPER FUNCTIONS
  121. #
  122. # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
  123. # A custom loader for YAML to let us also keep track of line numbers
  124. # of each entry in the YAML file
  125. class LineLoader(YamlLoader):
  126. def construct_mapping(self, node, deep=False): # type: ignore[no-untyped-def]
  127. mapping = super().construct_mapping(node, deep=deep) # type: ignore[no-untyped-call]
  128. # Add 1 so line numbering starts at 1
  129. mapping["__line__"] = node.start_mark.line + 1
  130. return mapping
  131. _GLOBAL_PARSE_NATIVE_YAML_CACHE = {}
  132. _GLOBAL_PARSE_TAGS_YAML_CACHE = {}
  133. # Parse native_functions.yaml into a sequence of NativeFunctions and Backend Indices.
  134. ParsedYaml = namedtuple("ParsedYaml", ["native_functions", "backend_indices"])
  135. def parse_native_yaml_struct(
  136. es: object,
  137. valid_tags: Set[str],
  138. ignore_keys: Optional[Set[DispatchKey]] = None,
  139. path: str = "<stdin>",
  140. skip_native_fns_gen: bool = False,
  141. ) -> ParsedYaml:
  142. assert isinstance(es, list)
  143. rs: List[NativeFunction] = []
  144. bs: Dict[DispatchKey, Dict[OperatorName, BackendMetadata]] = defaultdict(dict)
  145. for e in es:
  146. assert isinstance(e.get("__line__"), int), e
  147. loc = Location(path, e["__line__"])
  148. funcs = e.get("func")
  149. with context(lambda: f"in {loc}:\n {funcs}"):
  150. func, m = NativeFunction.from_yaml(e, loc, valid_tags, ignore_keys)
  151. rs.append(func)
  152. BackendIndex.grow_index(bs, m)
  153. error_check_native_functions(rs)
  154. # Default dict is to prevent the codegen from barfing when we have a dispatch key that has no kernels yet.
  155. indices: Dict[DispatchKey, BackendIndex] = defaultdict(
  156. lambda: BackendIndex(
  157. dispatch_key=DispatchKey.Undefined,
  158. use_out_as_primary=True,
  159. external=False,
  160. device_guard=False,
  161. # I'm actually not sure about this; undefined could be hit on
  162. # empty TensorList, hypothetically that could have sizes in it
  163. index={},
  164. )
  165. )
  166. if not skip_native_fns_gen:
  167. add_generated_native_functions(rs, bs)
  168. for k, v in bs.items():
  169. # All structured in-tree operators are implemented in terms of their out operator.
  170. indices[k] = BackendIndex(
  171. dispatch_key=k,
  172. use_out_as_primary=True,
  173. external=False,
  174. # Only cuda-like devices in tree require device guards
  175. device_guard=is_cuda_dispatch_key(k),
  176. index=v,
  177. )
  178. return ParsedYaml(rs, indices)
  179. def parse_tags_yaml_struct(es: object, path: str = "<stdin>") -> Set[str]:
  180. assert isinstance(es, list)
  181. rs: Set[str] = set()
  182. for e in es:
  183. assert isinstance(e.get("__line__"), int), e
  184. loc = Location(path, e["__line__"])
  185. tags = e.get("tag")
  186. with context(lambda: f"in {loc}:\n {tags}"):
  187. e_i = e.copy()
  188. name = e_i.pop("tag")
  189. desc = e_i.pop("desc", "")
  190. # ensure that each tag has a non-empty description
  191. assert desc != ""
  192. rs.add(name)
  193. return rs
  194. @functools.lru_cache(maxsize=None)
  195. def parse_tags_yaml(path: str) -> Set[str]:
  196. global _GLOBAL_PARSE_TAGS_YAML_CACHE
  197. if path not in _GLOBAL_PARSE_TAGS_YAML_CACHE:
  198. with open(path, "r") as f:
  199. es = yaml.load(f, Loader=LineLoader)
  200. _GLOBAL_PARSE_TAGS_YAML_CACHE[path] = parse_tags_yaml_struct(es, path=path)
  201. return _GLOBAL_PARSE_TAGS_YAML_CACHE[path]
  202. def parse_native_yaml(
  203. path: str,
  204. tags_yaml_path: str,
  205. ignore_keys: Optional[Set[DispatchKey]] = None,
  206. *,
  207. skip_native_fns_gen: bool = False,
  208. ) -> ParsedYaml:
  209. global _GLOBAL_PARSE_NATIVE_YAML_CACHE
  210. if path not in _GLOBAL_PARSE_NATIVE_YAML_CACHE:
  211. valid_tags = parse_tags_yaml(tags_yaml_path)
  212. with open(path, "r") as f:
  213. es = yaml.load(f, Loader=LineLoader)
  214. _GLOBAL_PARSE_NATIVE_YAML_CACHE[path] = parse_native_yaml_struct(
  215. es,
  216. valid_tags,
  217. ignore_keys,
  218. path=path,
  219. skip_native_fns_gen=skip_native_fns_gen,
  220. )
  221. return _GLOBAL_PARSE_NATIVE_YAML_CACHE[path]
  222. # Some assertions are already performed during parsing, but those are only within a single NativeFunction.
  223. # Assertions here are meant to be performed across NativeFunctions.
  224. def error_check_native_functions(funcs: Sequence[NativeFunction]) -> None:
  225. func_map: Dict[OperatorName, NativeFunction] = {}
  226. base_func_map: Dict[BaseOperatorName, List[NativeFunction]] = defaultdict(list)
  227. for f in funcs:
  228. func_map[f.func.name] = f
  229. base_func_map[f.func.name.name].append(f)
  230. for f in funcs:
  231. if f.structured_delegate is not None:
  232. delegate_func = func_map[f.structured_delegate]
  233. assert delegate_func.structured, (
  234. f"{f.func.name} is marked as a structured_delegate pointing to "
  235. f"{f.structured_delegate}, but {f.structured_delegate} is not marked as structured. "
  236. f"Consider adding 'structured=True' to the delegated operator"
  237. )
  238. # See Note [resize_ in Functionalization]
  239. # resize_() is technically an inplace view op (and therefore needs the tag),
  240. # but it would be overkill to add a true "view" variant of resize.
  241. # Instead, resize_() gets special treatment in functionalization,
  242. # and we have a resize() op that is non-aliasing + functional.
  243. if "inplace_view" in f.tags and str(f.func.name) != "resize_":
  244. base_name = f.func.name.name
  245. overload_name = f.func.name.overload_name
  246. assert base_name.inplace, (
  247. f"{f.func.name} is marked with tag: inplace_view, but it doesn't follow the naming "
  248. "convention for inplace ops - the codegen expects the base name to have a trailing underscore. "
  249. )
  250. out_of_place_base_name = BaseOperatorName(
  251. base_name.base, False, base_name.dunder_method
  252. )
  253. assert len(base_func_map[out_of_place_base_name]) > 0, (
  254. f"{f.func.name} is marked with tag: inplace_view. The codegen expects there to be a corresponding "
  255. f"out-of-place view op with the name '{base_name}' and matching schema, but it didn't find one. "
  256. )
  257. def cpp_string(s: str) -> str:
  258. """Convert a python string into a c++ string literal"""
  259. s = s.replace("\\", "\\\\")
  260. s = s.replace('"', '\\"')
  261. s = s.replace("\a", "\\a")
  262. s = s.replace("\b", "\\b")
  263. s = s.replace("\f", "\\f")
  264. s = s.replace("\n", "\\n")
  265. s = s.replace("\v", "\\v")
  266. s = s.replace("\t", "\\t")
  267. return f'"{s}"'
  268. # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
  269. #
  270. # C++ CODE GENERATION
  271. #
  272. # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
  273. # Most functions in this section are curried: they consist of a function
  274. # that takes some parameters (e.g., what is to be generated) which itself
  275. # returns a function that actually maps NativeFunction to the code
  276. # to be generated. This pattern makes it convenient to use map, concatMap
  277. # and similar functional combinators.
  278. def static_dispatch_keys(backends: List[BackendIndex]) -> List[DispatchKey]:
  279. if len(backends) == 0:
  280. return []
  281. else:
  282. return [backend.dispatch_key for backend in backends] + [
  283. DispatchKey.CompositeImplicitAutograd,
  284. DispatchKey.CompositeImplicitAutogradNestedTensor,
  285. DispatchKey.CompositeExplicitAutograd,
  286. DispatchKey.CompositeExplicitAutogradNonFunctional,
  287. ]
  288. def get_static_dispatch_backend(
  289. f: NativeFunction, backend_index: BackendIndex
  290. ) -> Optional[DispatchKey]:
  291. if f.structured_delegate is not None or backend_index.has_kernel(f):
  292. # TODO: for ops with structured_delegate it should check the dispatch table of
  293. # the out variant instead. For now, these structured ops all have CPU/CUDA kernels
  294. # so we always dispatch to the `backend`, but this could be wrong when we
  295. # migrate math/default_backend ops to use structured delegate.
  296. return backend_index.dispatch_key
  297. elif f.has_composite_explicit_autograd_kernel:
  298. return DispatchKey.CompositeExplicitAutograd
  299. elif f.has_composite_explicit_autograd_non_functional_kernel:
  300. return DispatchKey.CompositeExplicitAutogradNonFunctional
  301. elif f.has_composite_implicit_autograd_kernel:
  302. return DispatchKey.CompositeImplicitAutograd
  303. elif f.has_composite_implicit_autograd_nested_tensor_kernel:
  304. return DispatchKey.CompositeImplicitAutogradNestedTensor
  305. return None
  306. def static_dispatch_ops_header(
  307. f: NativeFunction, backend_index: List[BackendIndex]
  308. ) -> Optional[str]:
  309. if backend_index is None or f.manual_kernel_registration:
  310. return None
  311. output = []
  312. for index in backend_index:
  313. dispatch_key = get_static_dispatch_backend(f, index)
  314. if dispatch_key is not None:
  315. output.append(
  316. f"#include <ATen/ops/{f.root_name}_{dispatch_key.lower()}_dispatch.h>"
  317. )
  318. return "\n".join(output)
  319. def static_dispatch_extra_headers(backends: List[BackendIndex]) -> List[str]:
  320. return [
  321. f"#include <ATen/{dispatch_key}Functions.h>"
  322. for dispatch_key in static_dispatch_keys(backends)
  323. ]
  324. # Translates arguments of `sig` to CppSignature bindings.
  325. # Note that we have a special case for `memory_format` argument and this case is not covered by
  326. # tools.codegen.api.translate() yet as its application is limited to static dispatch.
  327. def translate_args(
  328. sig: Union[CppSignature, DispatcherSignature],
  329. cpp_sig: CppSignature,
  330. ) -> str:
  331. # Adds SpecialArgName.possibly_redundant_memory_format NamedCType for memory_format bindings
  332. def add_spl_memory_format_binding(input_bindings: List[Binding]) -> List[Binding]:
  333. output_bindings: List[Binding] = []
  334. for binding in input_bindings:
  335. if binding.name == "memory_format":
  336. spl_mem_format_binding = Binding(
  337. nctype=NamedCType(
  338. SpecialArgName.possibly_redundant_memory_format,
  339. binding.nctype.type,
  340. ),
  341. name=binding.name,
  342. default=binding.default,
  343. argument=binding.argument,
  344. )
  345. output_bindings.append(spl_mem_format_binding)
  346. else:
  347. output_bindings.append(binding)
  348. return output_bindings
  349. src_bindings = list(sig.arguments())
  350. goal_bindings = list(cpp_sig.arguments())
  351. # When last argument of CPP signature has SpecialArgName.possibly_redundant_memory_format NCType,
  352. # get memory_format bindings of dispatcher signature to have the same NCType as well
  353. for arg in goal_bindings:
  354. if arg.nctype.name == SpecialArgName.possibly_redundant_memory_format:
  355. src_bindings = add_spl_memory_format_binding(src_bindings)
  356. break
  357. exprs = translate(src_bindings, goal_bindings)
  358. return ", ".join(a.expr for a in exprs)
  359. def generate_static_dispatch_backend_call(
  360. sig: Union[CppSignature, DispatcherSignature],
  361. f: NativeFunction,
  362. backend_index: BackendIndex,
  363. ) -> str:
  364. cpp_sigs = CppSignatureGroup.from_native_function(
  365. f, method=False, fallback_binding=False
  366. )
  367. if sig.symint and f.func.has_symint():
  368. cpp_sig = cpp_sigs.symint_signature
  369. else:
  370. cpp_sig = cpp_sigs.signature
  371. assert cpp_sig is not None
  372. name = cpp_sig.name()
  373. exprs = translate_args(sig, cpp_sig)
  374. backend_metadata = backend_index.get_kernel(f)
  375. kernel_ns = (
  376. backend_metadata.cpp_namespace
  377. if backend_metadata and backend_metadata.cpp_namespace
  378. else DEFAULT_KERNEL_NAMESPACE
  379. )
  380. ns = kernel_ns.replace("::native", "")
  381. return f"return {ns}::{backend_index.dispatch_key.lower()}::{name}({exprs});"
  382. def generate_static_dispatch_fallback_call(
  383. sig: Union[CppSignature, DispatcherSignature],
  384. f: NativeFunction,
  385. backend_indices: List[BackendIndex],
  386. ) -> str:
  387. cpp_sigs = CppSignatureGroup.from_native_function(
  388. f, method=False, fallback_binding=False
  389. )
  390. if sig.symint and f.func.has_symint():
  391. cpp_sig = cpp_sigs.symint_signature
  392. else:
  393. cpp_sig = cpp_sigs.signature
  394. assert cpp_sig is not None
  395. name = cpp_sig.name()
  396. exprs = translate_args(sig, cpp_sig)
  397. ns = DEFAULT_KERNEL_NAMESPACE.replace("::native", "")
  398. if f.has_composite_explicit_autograd_kernel:
  399. return f"return {ns}::{DispatchKey.CompositeExplicitAutograd.lower()}::{name}({exprs});"
  400. elif f.has_composite_explicit_autograd_non_functional_kernel:
  401. return f"return {ns}::{DispatchKey.CompositeExplicitAutogradNonFunctional.lower()}::{name}({exprs});"
  402. elif f.has_composite_implicit_autograd_kernel:
  403. return f"return {ns}::{DispatchKey.CompositeImplicitAutograd.lower()}::{name}({exprs});"
  404. elif f.has_composite_implicit_autograd_nested_tensor_kernel:
  405. return f"return {ns}::{DispatchKey.CompositeImplicitAutogradNestedTensor.lower()}::{name}({exprs});"
  406. else:
  407. return f"""TORCH_CHECK(false, "Static dispatch does not support {name} for\
  408. {', '.join([str(index.dispatch_key)for index in backend_indices])} ");"""
  409. def static_dispatch(
  410. sig: Union[CppSignature, DispatcherSignature],
  411. f: NativeFunction,
  412. backend_indices: List[BackendIndex],
  413. ) -> str:
  414. """
  415. For a given `NativeFunction`, find out the corresponding backend and dispatch to it. If more than one
  416. backends exsit, fallback to static dispatch by determining dispatch key from inputs.
  417. Arguments:
  418. sig: A CppSignature or DispatcherSignature for this native function we want to use.
  419. f: NativeFunction to generate static dispatch.
  420. backend_indices: All available backends.
  421. Return:
  422. C++ code to call backend-specific functions, e.g., "return at::cpu::add(self, other, scale);"
  423. """
  424. if len(backend_indices) == 0 or f.manual_kernel_registration:
  425. return ""
  426. keys = [
  427. b
  428. for b in backend_indices
  429. if b.has_kernel(f)
  430. or (
  431. f.structured_delegate is not None
  432. and b.dispatch_key in STRUCTURED_DISPATCH_KEYS
  433. )
  434. ]
  435. if len(keys) == 1:
  436. return generate_static_dispatch_backend_call(sig, f, keys[0])
  437. elif len(keys) == 0:
  438. return generate_static_dispatch_fallback_call(sig, f, backend_indices)
  439. native_tensor_args = [
  440. a.name
  441. for a in sig.arguments()
  442. if isinstance(a.argument, SelfArgument)
  443. or isinstance(a.argument, Argument)
  444. and a.argument.type.is_tensor_like()
  445. ]
  446. tensor_args = ", ".join(native_tensor_args)
  447. tensor_opts = f.func.arguments.tensor_options
  448. stmts = []
  449. subexprs: List[str] = []
  450. if tensor_opts is not None:
  451. subexprs.append(
  452. "DispatchKeySet(c10::computeDispatchKey(dtype, layout, device))"
  453. )
  454. if tensor_args != "":
  455. subexprs.append(f"c10::detail::multi_dispatch_key_set({tensor_args})")
  456. stmts.append(f"""DispatchKeySet _dk_set = {' | '.join(subexprs)};""")
  457. stmts.append("DispatchKey _dk = c10::highestPriorityBackendTypeId(_dk_set);")
  458. dispatch_code = []
  459. for index in keys:
  460. dispatch_code.append(f"""case DispatchKey::{index.dispatch_key}:""")
  461. dispatch_code.append(
  462. f"""\t{generate_static_dispatch_backend_call(sig, f, index)};"""
  463. )
  464. fallback = generate_static_dispatch_fallback_call(sig, f, backend_indices)
  465. connector = "\n\t\t"
  466. return f"""
  467. {connector.join(stmts)}
  468. switch (_dk) {{
  469. {connector.join(dispatch_code)}
  470. default:
  471. {fallback}
  472. }}
  473. """
  474. # Generates RegisterSchema.cpp. Depending on the selector, either
  475. # all schemas are registered, or only some are (in the case of
  476. # selective build)
  477. @dataclass(frozen=True)
  478. class RegisterSchema:
  479. selector: SelectiveBuilder
  480. @method_with_native_function
  481. def __call__(self, f: NativeFunction) -> Optional[str]:
  482. if not self.selector.is_native_function_selected(f):
  483. return None
  484. tags = "{" + ", ".join([f"at::Tag::{tag}" for tag in f.tags]) + "}"
  485. return f"m.def({cpp_string(str(f.func))}, {tags});\n"
  486. # Generates Operators.h and Operators.cpp.
  487. # These provide macros that, given an operator and overload name, allow users
  488. # to access an "un-overloaded" function version of the operator. This
  489. # is useful for extension writers who want to (1) want to decltype the operator
  490. # and (2) don't want to worry about method-only operators.
  491. @dataclass(frozen=True)
  492. class ComputeOperators:
  493. target: Literal[Target.DECLARATION, Target.DEFINITION]
  494. static_dispatch_backend_indices: List[BackendIndex]
  495. @method_with_native_function
  496. def __call__(self, f: NativeFunction) -> str:
  497. sig = DispatcherSignature.from_schema(f.func)
  498. name = f.func.name.unambiguous_name()
  499. if self.target is Target.DECLARATION:
  500. # Note [The ATen Operators API]
  501. # The ATen Operators API lives in the at::_ops namespace, and contains compile-time
  502. # metadata about each operator + entry points into the Dispatcher.
  503. # The C++ function, method, and redispatch API's are all implemented as wrappers
  504. # into various bits of the structs defined here.
  505. #
  506. # Important characteristics about the Operators API:
  507. # (1) It follows the Dispatcher API.
  508. # This is kind of necessary to avoid overhead.
  509. # For example: if it followed the C++ API, then all of the faithful C++ factory functions
  510. # would need to wrap their arguments into TensorOptions only to unwrap them again.
  511. # (2) Overload names are disambiguated.
  512. # This is helpful for pytorch extenders who would like to decltype() an aten operator,
  513. # that has overloads, e.g. decltype(at::_ops::mul_Tensor::call)
  514. # (3) No argument defaulting is allowed.
  515. # This is more of an implementation detail to avoid #include cycles,
  516. # since TensorBody.h (which defines the Tensor class) needs to include this file.
  517. # (4) manual_cpp_bindings and faithful names are not included in the API.
  518. # This applies to stuff like __dispatch__is_complex(), and add_outf().
  519. # These aren't "real aten ops", they're just additional functions provided by the C++ API.
  520. # They're implemented as wrappers in Functions.h that call into the actual operators
  521. # defined here, i.e. at::_ops::is_complex::call() and at::_ops::add_out::call().
  522. # This means that ATEN_OP(is_complex) will not fastpath, and will go through the dispatcher.
  523. return f"""
  524. struct TORCH_API {name} {{
  525. using schema = {sig.type()};
  526. using ptr_schema = schema*;
  527. // See Note [static constexpr char* members for windows NVCC]
  528. STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::{f.func.name.name}")
  529. STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "{f.func.name.overload_name}")
  530. STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, {cpp_string(str(f.func))})
  531. static {sig.defn(name="call", is_redispatching_fn=False)};
  532. static {sig.defn(name="redispatch", is_redispatching_fn=True)};
  533. }};"""
  534. elif self.target is Target.DEFINITION:
  535. defns = f"""
  536. STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA({name}, name, "aten::{f.func.name.name}")
  537. STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA({name}, overload_name, "{f.func.name.overload_name}")
  538. STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA({name}, schema_str, {cpp_string(str(f.func))})
  539. // aten::{f.func}
  540. static C10_NOINLINE c10::TypedOperatorHandle<{name}::schema> create_{name}_typed_handle() {{
  541. return c10::Dispatcher::singleton()
  542. .findSchemaOrThrow({name}::name, {name}::overload_name)
  543. .typed<{name}::schema>();
  544. }}
  545. """
  546. for is_redispatching_fn in [False, True]:
  547. if is_redispatching_fn:
  548. dispatcher_exprs_str = ", ".join(
  549. ["dispatchKeySet"] + [a.name for a in sig.arguments()]
  550. )
  551. method_base = "redispatch"
  552. else:
  553. dispatcher_exprs_str = ", ".join([a.name for a in sig.arguments()])
  554. method_base = "call"
  555. dispatcher_call = method_base
  556. method_name = f"{name}::{method_base}"
  557. fn_body = f"""
  558. static auto op = create_{name}_typed_handle();
  559. return op.{dispatcher_call}({dispatcher_exprs_str});"""
  560. if (
  561. not is_redispatching_fn
  562. and len(self.static_dispatch_backend_indices) > 0
  563. ):
  564. # call() should go through static dispatch
  565. fn_body = static_dispatch(
  566. sig, f, backend_indices=self.static_dispatch_backend_indices
  567. )
  568. defns += f"""
  569. // aten::{f.func}
  570. {sig.defn(name=method_name, is_redispatching_fn=is_redispatching_fn)} {{
  571. {fn_body}
  572. }}
  573. """
  574. return defns
  575. else:
  576. assert_never(self.target)
  577. # Generates Functions.h, which provides the functional public C++ API,
  578. # and the scaffolding to call into the dispatcher from these functions.
  579. @dataclass(frozen=True)
  580. class ComputeFunction:
  581. @method_with_native_function
  582. def __call__(self, f: NativeFunction) -> Optional[str]:
  583. sig_group = CppSignatureGroup.from_native_function(
  584. f, method=False, fallback_binding=f.manual_cpp_binding
  585. )
  586. has_symint = f.func.has_symint()
  587. result = ""
  588. for sig in sig_group.signatures():
  589. # See Note [The ATen Operators API]
  590. target_sig = DispatcherSignature.from_schema(f.func)
  591. exprs = translate(sig.arguments(), target_sig.arguments())
  592. exprs_str = ", ".join([e.expr for e in exprs])
  593. if sig.symint:
  594. intlike_t = "c10::SymInt"
  595. else:
  596. intlike_t = "int64_t"
  597. if Variant.function in f.variants:
  598. result += f"""
  599. // aten::{f.func}
  600. inline {sig.decl()} {{
  601. return at::_ops::{f.func.name.unambiguous_name()}::call({exprs_str});
  602. }}"""
  603. # The template function can be used from template situations
  604. # where you want to switch between the symint or not version
  605. # depending on a template argument
  606. #
  607. # NB: we ALWAYS generate this even for methods. But we put it in
  608. # this header so it can take advantage of per-op headers
  609. if has_symint:
  610. result += f"""
  611. namespace symint {{
  612. template <typename T, typename = std::enable_if_t<std::is_same<T, {intlike_t}>::value>>
  613. {sig.decl(suppress_symint_suffix=True)} {{
  614. return at::_ops::{f.func.name.unambiguous_name()}::call({exprs_str});
  615. }}
  616. }}
  617. """
  618. return result
  619. # Generates TensorBody.h. This file provides the object-oriented (method-based)
  620. # public C++ API, and the scaffolding to call into the dispatcher from these functions.
  621. @dataclass(frozen=True)
  622. class ComputeTensorMethod:
  623. target: Literal[Target.DECLARATION, Target.DEFINITION]
  624. static_dispatch_backend_indices: List[BackendIndex]
  625. @method_with_native_function
  626. def __call__(self, f: NativeFunction) -> Optional[str]:
  627. if Variant.method not in f.variants:
  628. return None
  629. assert not f.func.is_out_fn()
  630. assert f.func.arguments.self_arg is not None
  631. sig_group = CppSignatureGroup.from_native_function(
  632. f, method=True, fallback_binding=f.manual_cpp_binding
  633. )
  634. if self.target is Target.DECLARATION:
  635. result = ""
  636. for sig in sig_group.signatures():
  637. result += f"{sig.decl()} const;\n"
  638. return result
  639. if self.target is not Target.DEFINITION:
  640. assert_never(self.target)
  641. result = ""
  642. for sig in sig_group.signatures():
  643. target_sig = DispatcherSignature.from_schema(f.func)
  644. exprs = translate(sig.arguments(), target_sig.arguments(), method=True)
  645. exprs_str = ", ".join([e.expr for e in exprs])
  646. result += f"""
  647. // aten::{f.func}
  648. inline {sig.defn(prefix="Tensor::")} const {{
  649. return at::_ops::{f.func.name.unambiguous_name()}::call({exprs_str});
  650. }}
  651. """
  652. return result
  653. # Generates RedispatchFunctions.h.
  654. # This is similar to the C++ API defined in Functions.h, but provides access
  655. # to the dispatcher's redispatch API.
  656. @dataclass(frozen=True)
  657. class ComputeRedispatchFunction:
  658. @method_with_native_function
  659. def __call__(self, f: NativeFunction) -> Optional[str]:
  660. # We unconditionally generate function variants of the redispatch API.
  661. # This is mainly because we can namespace functions separately, but not methods,
  662. sig_group = CppSignatureGroup.from_native_function(
  663. f, method=False, fallback_binding=f.manual_cpp_binding
  664. )
  665. result = ""
  666. for sig in sig_group.signatures():
  667. target_sig = DispatcherSignature.from_schema(f.func)
  668. exprs = translate(sig.arguments(), target_sig.arguments())
  669. exprs_str = ", ".join(["dispatchKeySet"] + [a.expr for a in exprs])
  670. result += f"""
  671. // aten::{f.func}
  672. inline {sig.decl(is_redispatching_fn=True)} {{
  673. return at::_ops::{f.func.name.unambiguous_name()}::redispatch({exprs_str});
  674. }}
  675. """
  676. return result
  677. # Generates ATenOpList.cpp, a runtime accessible list of all aten
  678. # operators.
  679. # TODO: This was historically used to help some JIT interop code
  680. # figure out whether or not to treat aten namespace'd operators
  681. # one way or another, we should reevaluate if this is actually needed.
  682. @with_native_function
  683. def compute_aten_op(f: NativeFunction) -> str:
  684. return f'{{"aten::{f.func.name.name}", "{f.func.name.overload_name}"}},'
  685. # Generates MetaFunctions.h
  686. def compute_meta_function_declaration(g: NativeFunctionsGroup) -> Optional[str]:
  687. if not g.structured:
  688. return None
  689. with native_function_manager(g.out):
  690. name = meta.name(g)
  691. args = structured.meta_arguments(g)
  692. args_str = ", ".join(a.decl() for a in args)
  693. parent_class = g.out.structured_inherits
  694. if parent_class is None:
  695. parent_class = "at::impl::MetaBase"
  696. meta_return = "void"
  697. precomputed = g.out.precomputed if g.structured else None
  698. if precomputed:
  699. # Generate the template declaration with one bool parameter for each
  700. # precomputed element. Each parameter is true if the corresponding (in
  701. # terms of position) precomputed element has been set.
  702. precomputed_values = [*precomputed.replace.values(), precomputed.add]
  703. precomputed_elements = [
  704. elem for replace_list in precomputed_values for elem in replace_list
  705. ]
  706. precomputed_template_parameters = [
  707. elem.name.upper() for elem in precomputed_elements
  708. ]
  709. precomputed_template_params_str = ", ".join(
  710. f"bool {param} = false" for param in precomputed_template_parameters
  711. )
  712. precompute_template_decl = f"template <{precomputed_template_params_str}>"
  713. # Generate a string containing declarations of all precomputed elements.
  714. precomputed_elements_with_cpp_types = [
  715. structured.argument_type(elem, binds=elem.name)
  716. for elem in precomputed_elements
  717. ]
  718. precomputed_elements_decl = ";\n".join(
  719. f"{elem.cpp_type(strip_ref=True)} {elem.name}"
  720. for elem in precomputed_elements_with_cpp_types
  721. )
  722. # Generate "setter" methods for each precomputed element. Each method will return
  723. # a new instance of precompute_out with the template parameter that corresponds to
  724. # the member set by the method to true (to indicate that it has been set).
  725. setter_methods = []
  726. for i, elem in enumerate(precomputed_elements):
  727. # Generate the signature. The return type will be the same
  728. # as the type of `this` but with the template parameter
  729. # corresponding to the element set by this method set to true.
  730. # The assert generated below will ensure that this template
  731. # parameter is false on the type of `this`.
  732. return_ty_templates = ", ".join(
  733. precomputed_template_parameters[:i]
  734. + ["true"]
  735. + precomputed_template_parameters[i + 1 :]
  736. )
  737. return_ty = f"precompute_out<{return_ty_templates}>"
  738. elem_cpp_ty = precomputed_elements_with_cpp_types[i].cpp_type(
  739. strip_ref=True
  740. )
  741. signature = f"{return_ty} set_{elem.name}({elem_cpp_ty} value)"
  742. # Generate an assert which checks that the
  743. # template parameter corresponding to the precomputed
  744. # element that is set by this method is false on the
  745. # class corresponding to the object that `this` points to.
  746. # This ensures that each element can be set only once.
  747. assert_msg = f'"{precomputed_elements[i].name} already set"'
  748. assert_stmt = f"static_assert({precomputed_template_parameters[i]} == false, {assert_msg});"
  749. # Generate the new object construction block. All state
  750. # except the element that this method sets is copied from the
  751. # object that `this` points to. The value for the element that
  752. # the method sets is taken from a method parameter.
  753. construction_stmts = []
  754. construction_stmts.append(f"{return_ty} ret;")
  755. for j, elem in enumerate(precomputed_elements):
  756. if i == j:
  757. construction_stmts.append(f"ret.{elem.name} = value;")
  758. else:
  759. construction_stmts.append(
  760. f"ret.{elem.name} = this->{elem.name};"
  761. )
  762. construction_stmts.append("return ret;")
  763. construction_block = "\n".join(construction_stmts)
  764. setter_methods.append(
  765. f"""
  766. {signature} {{
  767. {assert_stmt}
  768. {construction_block}
  769. }}
  770. """
  771. )
  772. setter_methods_decl = "\n".join(setter_methods)
  773. # Meta should return an instance of the struct containing the precomputed elements.
  774. meta_return_template_params = ", ".join(
  775. ["true"] * len(precomputed_template_parameters)
  776. )
  777. # This typedef (actually a using statement) is needed so that TORCH_META_FUNC can reuse the return
  778. # type (which has a variable number of template parameters).
  779. meta_return_typedef = f"using meta_return_ty = precompute_out <{meta_return_template_params}>;"
  780. meta_return = "meta_return_ty"
  781. precomputed_decl = f"""
  782. {precompute_template_decl}
  783. struct TORCH_API precompute_out {{
  784. {setter_methods_decl}
  785. {precomputed_elements_decl};
  786. }};"""
  787. else:
  788. meta_return_typedef = ""
  789. precomputed_decl = ""
  790. return f"""\
  791. struct TORCH_API structured_{name} : public {parent_class} {{
  792. {precomputed_decl}
  793. {meta_return_typedef}
  794. {meta_return} meta({args_str});
  795. }};
  796. """
  797. def needs_backend_select(f: NativeFunction, selector: SelectiveBuilder) -> bool:
  798. name = str(f.func.name.name)
  799. if name.endswith("_like") or name.startswith("new_"):
  800. return False
  801. if f.func.arguments.tensor_options is None:
  802. return False
  803. return selector.is_native_function_selected(f)
  804. # Generates RegisterBackendSelect.cpp, a series of kernels which provide
  805. # specialized computation of dispatch key for operator signatures which cannot
  806. # be easily done automatically using templating.
  807. @dataclass(frozen=True)
  808. class ComputeBackendSelect:
  809. target: Literal[Target.DEFINITION, Target.REGISTRATION]
  810. # Selector object to determine which operators to generate
  811. # registration code for.
  812. selector: SelectiveBuilder
  813. @method_with_native_function
  814. def __call__(self, f: NativeFunction) -> Optional[str]:
  815. if not needs_backend_select(f, self.selector):
  816. return None
  817. name = native.name(f.func)
  818. # BackendSelect can go to Meta, so it must preserve symints
  819. native_sig = NativeSignature(f.func, symint=True)
  820. native_tensor_args = [
  821. a
  822. for a in native_sig.arguments()
  823. if isinstance(a.argument, Argument) and a.argument.type.is_tensor_like()
  824. ]
  825. dispatcher_sig = DispatcherSignature.from_schema(f.func)
  826. sig: Union[NativeSignature, DispatcherSignature]
  827. sig = dispatcher_sig
  828. dispatcher_exprs = dispatcher_sig.exprs()
  829. dispatch_key = "c10::computeDispatchKey(dtype, layout, device)"
  830. if self.target is Target.DEFINITION:
  831. # I don't think there's actually a good reason to generate
  832. # these two cases differently
  833. # The first case could probably be improved though- it calls computeDispatchKeySet(),
  834. # which looks at TLS dispatch keys- there should not be any by the time we reach backend select.
  835. if native_tensor_args:
  836. assert f.func.arguments.has_tensor_arg()
  837. tensor_args = ", ".join(a.name for a in native_tensor_args)
  838. compute_dk = f"""\
  839. DispatchKeySet _dk_set = c10::DispatchKeySet({dispatch_key}) | c10::detail::multi_dispatch_key_set({tensor_args});
  840. DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect);
  841. DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask);"""
  842. else:
  843. assert not f.func.arguments.has_tensor_arg()
  844. compute_dk = (
  845. f"DispatchKeySet _dk = c10::DispatchKeySet({dispatch_key});"
  846. )
  847. return f"""\
  848. // aten::{f.func}
  849. C10_ALWAYS_INLINE
  850. {sig.defn(name)} {{
  851. {compute_dk}
  852. return at::_ops::{f.func.name.unambiguous_name()}::redispatch(
  853. _dk, {', '.join(a.expr for a in dispatcher_exprs)});
  854. }}
  855. """
  856. elif self.target is Target.REGISTRATION:
  857. return f"""m.impl("aten::{f.func.name}", TORCH_FN({name}));"""
  858. else:
  859. assert_never(self.target)
  860. # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
  861. #
  862. # YAML CODE GENERATION
  863. #
  864. # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
  865. def format_yaml(data: object) -> str:
  866. # Ignore alias in Dumper
  867. YamlDumper.ignore_aliases = lambda self, data: True # type: ignore[assignment]
  868. # Support serializing OrderedDict
  869. def dict_representer(dumper: Any, data: Any) -> Any:
  870. return dumper.represent_dict(data.items())
  871. YamlDumper.add_representer(OrderedDict, dict_representer) # type: ignore[no-untyped-call]
  872. # Some yaml parsers (e.g. Haskell's) don't understand line breaks.
  873. # width=1e9 turns off optional line breaks and improves
  874. # the portability of the outputted yaml.
  875. return yaml.dump(data, default_flow_style=False, Dumper=YamlDumper, width=1e9) # type: ignore[no-any-return, call-overload]
  876. # For some reason, some defaults we write to YAML are written as native
  877. # YAML objects, rather than doing them uniformly as strings. This
  878. # function detects those cases and converts them into native Python
  879. # objects.
  880. def pythonify_default(s: str) -> object:
  881. if s == "true":
  882. return True
  883. elif s == "false":
  884. return False
  885. try:
  886. return int(s)
  887. except ValueError:
  888. try:
  889. return float(s)
  890. except ValueError:
  891. return s
  892. # What is a dynamic type? Over time, the semantic meaning of
  893. # dynamic type has degraded to meaninglessness (in the old days,
  894. # it captured dtype-ness of types, but that has gone away with
  895. # the removal of TH). These days, it's mostly the same thing as
  896. # the C++ API argument type, except that Tensor and Tensor?
  897. # arguments simply present as Tensor.
  898. #
  899. # TODO: Get rid of dynamic_type, after getting tools/autograd
  900. # to use the new codegen framework
  901. def dynamic_type(t: Type) -> str:
  902. if isinstance(t, OptionalType):
  903. return dynamic_type(t.elem)
  904. # Note we don't use t.is_tensor_like() here because it would
  905. # also include Tensor[]
  906. if str(t) == "Tensor":
  907. return "at::Tensor"
  908. # This is a legacy concept, so never report SymInt
  909. return cpp.argumenttype_type(
  910. t, mutable=False, binds="__placeholder__", symint=False
  911. ).cpp_type()
  912. def compute_method_of_yaml(variants: Set[Variant]) -> List[str]:
  913. # This is written out explicitly to ensure that Tensor and
  914. # namespace are put into the list in the right order
  915. method_of = ["Type"]
  916. if Variant.method in variants:
  917. method_of.append("Tensor")
  918. if Variant.function in variants:
  919. method_of.append("namespace")
  920. return method_of
  921. def compute_returns_yaml(
  922. f: NativeFunction,
  923. ) -> Tuple[List[Dict[str, str]], Dict[str, str]]:
  924. # Note [name and field_name]
  925. # ~~~~~~~~~~~~~~~~~~~~~~~~~~
  926. # To understand name_to_field_name, we must first talk about this
  927. # schema:
  928. #
  929. # lstsq.X(Tensor self, Tensor A, *, Tensor(a!) X, Tensor(b!) qr) -> (Tensor(a!) solution, Tensor(b!) QR)
  930. #
  931. # There is something very odd about this schema: it is an out
  932. # variant of the function (that is to say, it will convert into
  933. # at::lstsq_out() in the C++ API), but the names of the output
  934. # return arguments don't match the keyword argument names of
  935. # the inputs. It TURNS OUT that in this situation, the historical
  936. # Declarations.yaml we want to output is this (abbreviated to
  937. # only show relevant fields):
  938. #
  939. # arguments:
  940. # ...
  941. # - field_name: solution
  942. # name: X
  943. # - field_name: QR
  944. # name: qr
  945. # ...
  946. #
  947. # returns:
  948. # - field_name: solution
  949. # name: X
  950. # - field_name: QR
  951. # name: qr
  952. #
  953. # The name of the return fields is stored in 'field_name', and the
  954. # name of the arguments is stored in 'name'. So when we process
  955. # arguments, we need a way to get at the corresponding return. At
  956. # the moment, this is most conveniently done by constructing a
  957. # mapping from name (the argument concept) to field_name (the
  958. # return concept) while processing return arguments, since we don't
  959. # directly maintain this correspondence in the modeling of function
  960. # schema itself.
  961. #
  962. # See also https://github.com/pytorch/pytorch/issues/43114
  963. name_to_field_name: Dict[str, str] = {}
  964. # Compute the returns field of the YAML entry
  965. names = cpp.return_names(f)
  966. returns = []
  967. for i, (r, name) in enumerate(zip(f.func.returns, names)):
  968. ret = {
  969. "dynamic_type": dynamic_type(r.type),
  970. "name": name,
  971. # legacy, report ints
  972. "type": cpp.return_type(r, symint=False).cpp_type(),
  973. }
  974. if r.name:
  975. # See Note [name and field_name]
  976. ret["field_name"] = r.name
  977. if f.func.is_out_fn():
  978. name_to_field_name[f.func.arguments.out[i].name] = r.name
  979. returns.append(ret)
  980. return returns, name_to_field_name
  981. # arguments in yaml roughly corresponds to the public C++ API
  982. def compute_cpp_argument_yaml(
  983. cpp_a: Binding,
  984. *,
  985. schema_order: bool,
  986. kwarg_only_set: Set[str],
  987. out_arg_set: Set[str],
  988. name_to_field_name: Dict[str, str],
  989. ) -> object:
  990. if isinstance(cpp_a.argument, TensorOptionsArguments):
  991. arg: Dict[str, object] = {
  992. "annotation": None,
  993. "dynamic_type": "at::TensorOptions",
  994. "is_nullable": False,
  995. "name": cpp_a.name,
  996. "type": cpp_a.type,
  997. "kwarg_only": True,
  998. }
  999. if cpp_a.default is not None:
  1000. arg["default"] = cpp_a.default
  1001. return arg
  1002. elif isinstance(cpp_a.argument, SelfArgument):
  1003. raise AssertionError()
  1004. elif isinstance(cpp_a.argument, Argument):
  1005. return compute_argument_yaml(
  1006. cpp_a.argument,
  1007. schema_order=schema_order,
  1008. kwarg_only_set=kwarg_only_set,
  1009. out_arg_set=out_arg_set,
  1010. name_to_field_name=name_to_field_name,
  1011. )
  1012. def compute_argument_yaml(
  1013. a: Argument,
  1014. *,
  1015. schema_order: bool,
  1016. kwarg_only_set: Set[str],
  1017. out_arg_set: Set[str],
  1018. name_to_field_name: Dict[str, str],
  1019. ) -> object:
  1020. arg: Dict[str, object] = {
  1021. "annotation": str(a.annotation) if a.annotation else None,
  1022. "dynamic_type": dynamic_type(a.type),
  1023. "is_nullable": a.type.is_nullable(),
  1024. "name": a.name,
  1025. # legacy, report ints
  1026. "type": cpp.argument_type(a, binds="__placeholder__", symint=False).cpp_type(),
  1027. }
  1028. if a.default is not None:
  1029. arg["default"] = pythonify_default(
  1030. cpp.default_expr(a.default, a.type, symint=False)
  1031. )
  1032. if a.name in kwarg_only_set:
  1033. arg["kwarg_only"] = True
  1034. if a.name in out_arg_set:
  1035. arg["output"] = True
  1036. arg["allocate"] = True
  1037. # See Note [name and field_name]
  1038. if a.name in name_to_field_name:
  1039. arg["field_name"] = name_to_field_name[a.name]
  1040. # Historically, booleans don't get their size recorded, because it
  1041. # is already built into the cpp type (e.g., std::array<bool, 4>)
  1042. l = a.type.is_list_like()
  1043. if l is not None and l.size is not None and str(l.elem) != "bool":
  1044. arg["size"] = l.size
  1045. return arg
  1046. @with_native_function
  1047. def compute_declaration_yaml(f: NativeFunction) -> object:
  1048. returns, name_to_field_name = compute_returns_yaml(f)
  1049. # These sets are used to conveniently test if an argument is a
  1050. # kwarg-only or out argument
  1051. kwarg_only_set = {a.name for a in f.func.arguments.flat_kwarg_only}
  1052. out_arg_set = {a.name for a in f.func.arguments.out}
  1053. sig_group = CppSignatureGroup.from_native_function(
  1054. f, method=False, fallback_binding=False
  1055. )
  1056. cpp_args = sig_group.signature.arguments()
  1057. arguments = [
  1058. compute_cpp_argument_yaml(
  1059. cpp_a,
  1060. schema_order=False,
  1061. kwarg_only_set=kwarg_only_set,
  1062. out_arg_set=out_arg_set,
  1063. name_to_field_name=name_to_field_name,
  1064. )
  1065. for cpp_a in cpp_args
  1066. ]
  1067. schema_order_jit_arguments = list(f.func.schema_order_arguments())
  1068. schema_order_arguments = [
  1069. compute_argument_yaml(
  1070. a,
  1071. schema_order=True,
  1072. kwarg_only_set=kwarg_only_set,
  1073. out_arg_set=out_arg_set,
  1074. name_to_field_name=name_to_field_name,
  1075. )
  1076. for a in schema_order_jit_arguments
  1077. ]
  1078. cpp_schema_order_types = [
  1079. # NB: method here doesn't matter
  1080. r.type
  1081. for a in schema_order_jit_arguments
  1082. for r in cpp.argument(
  1083. a,
  1084. method=False,
  1085. cpp_no_default_args=set(),
  1086. faithful=False,
  1087. symint=False,
  1088. has_tensor_options=False,
  1089. )
  1090. ]
  1091. # legacy, report ints
  1092. cpp_returns = cpp.returns_type(f.func.returns, symint=False).cpp_type()
  1093. schema_order_cpp_signature = f"{cpp_returns} ({', '.join(cpp_schema_order_types)})"
  1094. is_factory_method = (
  1095. any(isinstance(a.argument, TensorOptionsArguments) for a in cpp_args)
  1096. and Variant.method not in f.variants
  1097. )
  1098. return OrderedDict(
  1099. [
  1100. ("name", cpp.name(f.func)),
  1101. ("operator_name", str(f.func.name.name)),
  1102. ("overload_name", str(f.func.name.overload_name)),
  1103. ("manual_kernel_registration", f.manual_kernel_registration),
  1104. (
  1105. "category_override",
  1106. f.category_override if f.category_override is not None else "",
  1107. ),
  1108. ("schema_string", f"aten::{f.func}"),
  1109. ("arguments", arguments),
  1110. ("schema_order_cpp_signature", schema_order_cpp_signature),
  1111. ("schema_order_arguments", schema_order_arguments),
  1112. ("method_of", compute_method_of_yaml(f.variants)),
  1113. ("mode", "native"),
  1114. ("python_module", "" if f.python_module is None else f.python_module),
  1115. ("returns", returns),
  1116. ("inplace", f.func.name.name.inplace),
  1117. ("is_factory_method", is_factory_method),
  1118. ("abstract", f.is_abstract),
  1119. ("device_guard", f.device_guard),
  1120. ("with_gil", False),
  1121. ("deprecated", False),
  1122. ("has_math_kernel", f.has_composite_implicit_autograd_kernel),
  1123. ]
  1124. )
  1125. # See Note [Auto generated composite kernels]
  1126. def has_autogenerated_composite_kernel(f: NativeFunction) -> bool:
  1127. return (f.structured or f.structured_delegate is not None) and (
  1128. f.func.kind() == SchemaKind.functional or f.func.kind() == SchemaKind.inplace
  1129. )
  1130. @with_native_function_and_indices
  1131. def compute_registration_declarations(
  1132. f: NativeFunction, backend_indices: Dict[DispatchKey, BackendIndex]
  1133. ) -> str:
  1134. name = dispatcher.name(f.func)
  1135. returns_type = dispatcher.returns_type(
  1136. f.func.returns
  1137. ).cpp_type_registration_declarations()
  1138. args = dispatcher.arguments(f.func)
  1139. args_str = ", ".join(a.no_default().decl_registration_declarations() for a in args)
  1140. comment_data: Dict[str, str] = {
  1141. "schema": f"aten::{f.func}",
  1142. # TODO: What exactly is the semantics of the 'dispatch' field?
  1143. "dispatch": str(
  1144. {k for k, v in backend_indices.items() if v.has_kernel(f)}
  1145. != {DispatchKey.CompositeImplicitAutograd}
  1146. and {k for k, v in backend_indices.items() if v.has_kernel(f)}
  1147. != {
  1148. DispatchKey.CompositeImplicitAutograd,
  1149. DispatchKey.CompositeImplicitAutogradNestedTensor,
  1150. }
  1151. ),
  1152. "default": str(f.has_composite_kernel or has_autogenerated_composite_kernel(f)),
  1153. }
  1154. return f"""{returns_type} {name}({args_str}); // {json.dumps(comment_data)}
  1155. """
  1156. # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
  1157. #
  1158. # RUN IT ALL
  1159. #
  1160. # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
  1161. def get_custom_build_selector(
  1162. provided_op_registration_allowlist: Optional[List[str]],
  1163. op_selection_yaml_path: Optional[str],
  1164. ) -> SelectiveBuilder:
  1165. assert not (
  1166. provided_op_registration_allowlist is not None
  1167. and op_selection_yaml_path is not None
  1168. ), (
  1169. "Both provided_op_registration_allowlist and "
  1170. + "op_selection_yaml_path can NOT be provided at the "
  1171. + "same time."
  1172. )
  1173. op_registration_allowlist: Optional[Set[str]] = None
  1174. if provided_op_registration_allowlist is not None:
  1175. op_registration_allowlist = set(provided_op_registration_allowlist)
  1176. if op_registration_allowlist is not None:
  1177. selector = SelectiveBuilder.from_legacy_op_registration_allow_list(
  1178. op_registration_allowlist,
  1179. True,
  1180. False,
  1181. )
  1182. elif op_selection_yaml_path is not None:
  1183. selector = SelectiveBuilder.from_yaml_path(op_selection_yaml_path)
  1184. else:
  1185. selector = SelectiveBuilder.get_nop_selector()
  1186. return selector
  1187. def get_grouped_by_view_native_functions(
  1188. native_functions: Sequence[NativeFunction],
  1189. ) -> Sequence[Union[NativeFunction, NativeFunctionsViewGroup]]:
  1190. def maybe_create_view_group(
  1191. d: Dict[Union[ViewSchemaKind, SchemaKind], NativeFunction]
  1192. ) -> List[Union[NativeFunction, NativeFunctionsViewGroup]]:
  1193. funcs: List[Union[NativeFunction, NativeFunctionsViewGroup]] = []
  1194. if ViewSchemaKind.aliasing in d:
  1195. view = d.pop(ViewSchemaKind.aliasing)
  1196. view_inplace = d.pop(ViewSchemaKind.aliasing_inplace, None)
  1197. view_copy = d.pop(SchemaKind.functional, None)
  1198. funcs.append(
  1199. NativeFunctionsViewGroup(
  1200. view=view,
  1201. view_copy=view_copy,
  1202. view_inplace=view_inplace,
  1203. )
  1204. )
  1205. # Take the remaining functions that weren't part of the view group
  1206. # and emit them separately
  1207. for func in d.values():
  1208. funcs.append(func)
  1209. return funcs
  1210. grouped_by_views: Dict[
  1211. FunctionSchema, Dict[Union[SchemaKind, ViewSchemaKind], NativeFunction]
  1212. ] = defaultdict(dict)
  1213. for f in native_functions:
  1214. schema = f.func.view_signature()
  1215. view_kind: ViewSchemaKind = f.view_schema_kind
  1216. # We need to group up ops relevant to the same "view", consisting of:
  1217. # view op (ViewSchemaKind.aliasing)
  1218. # view_inplace op (ViewSchemaKind.aliasing_inplace)
  1219. # view_copy op (SchemaKind.functional)
  1220. if view_kind == ViewSchemaKind.non_aliasing:
  1221. kind = f.func.kind()
  1222. assert kind not in grouped_by_views[schema]
  1223. grouped_by_views[schema][kind] = f
  1224. else:
  1225. assert view_kind not in grouped_by_views[schema]
  1226. grouped_by_views[schema][view_kind] = f
  1227. return list(concatMap(maybe_create_view_group, grouped_by_views.values()))
  1228. def get_grouped_native_functions(
  1229. native_functions: Sequence[NativeFunction],
  1230. ) -> Sequence[Union[NativeFunction, NativeFunctionsGroup]]:
  1231. def flatten_pre_group(
  1232. d: Dict[SchemaKind, NativeFunction]
  1233. ) -> Sequence[Union[NativeFunction, NativeFunctionsGroup]]:
  1234. r = NativeFunctionsGroup.from_dict(d)
  1235. if r is None:
  1236. # Invariant: any NativeFunctions that are code-generated
  1237. # should have been grouped into NativeFunctionsGroup objects
  1238. assert not any("generated" in f.tags for f in d.values())
  1239. return list(d.values())
  1240. else:
  1241. return [r]
  1242. # TODO: how come ValuesView isn't a Sequence lol
  1243. pre_grouped_native_functions = pre_group_native_functions(native_functions)
  1244. return list(
  1245. concatMap(flatten_pre_group, list(pre_grouped_native_functions.values()))
  1246. )
  1247. # Return native function declarations grouped by their namespaces.
  1248. def get_native_function_declarations(
  1249. *,
  1250. grouped_native_functions: Sequence[Union[NativeFunction, NativeFunctionsGroup]],
  1251. backend_indices: Dict[DispatchKey, BackendIndex],
  1252. native_function_decl_gen: Callable[
  1253. [Union[NativeFunctionsGroup, NativeFunction], BackendIndex], List[str]
  1254. ] = dest.compute_native_function_declaration,
  1255. ) -> List[str]:
  1256. """
  1257. Generate kernel declarations, in `NativeFunction(s).h`.
  1258. :param grouped_native_functions: a sequence of `NativeFunction` or `NativeFunctionGroup`.
  1259. :param backend_indices: kernel collections grouped by dispatch key.
  1260. :param native_function_decl_gen: callable to generate kernel declaration for each `NativeFunction`.
  1261. :return: a list of string, from the string with all declarations, grouped by namespaces, split by newline.
  1262. """
  1263. declarations: List[str] = []
  1264. ns_grouped_kernels: Dict[str, List[str]] = defaultdict(list)
  1265. newline = "\n"
  1266. for f in grouped_native_functions:
  1267. native_function_namespaces = set()
  1268. dispatch_keys = set()
  1269. for dispatch_key, backend_idx in backend_indices.items():
  1270. backend_metadata = backend_idx.get_kernel(f)
  1271. if backend_metadata:
  1272. namespace = backend_metadata.cpp_namespace
  1273. dispatch_keys.add(dispatch_key)
  1274. native_function_namespaces.add(namespace)
  1275. else:
  1276. namespace = DEFAULT_KERNEL_NAMESPACE
  1277. assert (
  1278. len(native_function_namespaces) <= 1
  1279. ), f"Codegen only supports one namespace per operator, got {native_function_namespaces} from {dispatch_keys}"
  1280. ns_grouped_kernels[namespace].extend(
  1281. native_function_decl_gen(f, backend_idx)
  1282. )
  1283. for namespace, kernels in ns_grouped_kernels.items():
  1284. ns_helper = NamespaceHelper(
  1285. namespace_str=namespace,
  1286. entity_name="",
  1287. max_level=3,
  1288. )
  1289. # Convert to a set first to remove duplicate kernel names. Backends are
  1290. # allowed to repeat kernel names; only generate the declaration once!
  1291. ordered_kernels = list(OrderedDict.fromkeys(kernels))
  1292. declarations.extend(
  1293. f"""
  1294. {ns_helper.prologue}
  1295. {newline.join(ordered_kernels)}
  1296. {ns_helper.epilogue}
  1297. """.split(
  1298. newline
  1299. )
  1300. )
  1301. return declarations
  1302. def get_kernel_namespace(
  1303. *, f: Union[NativeFunction, NativeFunctionsGroup], backend_idx: BackendIndex
  1304. ) -> str:
  1305. backend_metadata = backend_idx.get_kernel(f)
  1306. assert not backend_metadata or "::native" in backend_metadata.cpp_namespace, (
  1307. f"The kernel for function {f.func.name if isinstance(f, NativeFunction) else f.functional.func.name} "
  1308. f"with dispatch key {backend_idx.dispatch_key}"
  1309. f" has a namespace {backend_metadata.cpp_namespace} and it's not ending with '::native'."
  1310. )
  1311. return (
  1312. backend_metadata.cpp_namespace if backend_metadata else DEFAULT_KERNEL_NAMESPACE
  1313. )
  1314. # Return native function definitions grouped by dispatch key and custom namespace.
  1315. # Used in RegisterDispatchKey.cpp and etc.
  1316. def get_native_function_definitions(
  1317. *,
  1318. fm: FileManager,
  1319. grouped_native_functions: Sequence[Union[NativeFunction, NativeFunctionsGroup]],
  1320. dispatch_key: DispatchKey,
  1321. backend_idx: BackendIndex,
  1322. selector: SelectiveBuilder,
  1323. rocm: bool,
  1324. symint: bool,
  1325. skip_dispatcher_op_registration: bool,
  1326. gen_dispatch_helpers: bool,
  1327. ) -> List[str]:
  1328. definitions: List[str] = []
  1329. ns_definitions: Dict[str, List[str]] = defaultdict(list)
  1330. anonymous_definitions: Dict[str, List[str]] = defaultdict(list)
  1331. registrations: Dict[str, Dict[str, List[str]]] = defaultdict(dict)
  1332. newline = "\n"
  1333. ns_gen = dest.RegisterDispatchKey(
  1334. backend_idx,
  1335. Target.NAMESPACED_DEFINITION,
  1336. selector,
  1337. rocm=rocm,
  1338. symint=symint,
  1339. class_method_name=None,
  1340. skip_dispatcher_op_registration=skip_dispatcher_op_registration,
  1341. )
  1342. anonymous_gen = dest.RegisterDispatchKey(
  1343. backend_idx,
  1344. Target.ANONYMOUS_DEFINITION,
  1345. selector,
  1346. rocm=rocm,
  1347. symint=symint,
  1348. class_method_name=None,
  1349. skip_dispatcher_op_registration=skip_dispatcher_op_registration,
  1350. )
  1351. reg_gen = dest.RegisterDispatchKey(
  1352. backend_idx,
  1353. Target.REGISTRATION,
  1354. selector,
  1355. rocm=rocm,
  1356. symint=symint,
  1357. class_method_name=None,
  1358. skip_dispatcher_op_registration=skip_dispatcher_op_registration,
  1359. )
  1360. for f in grouped_native_functions:
  1361. kernel_namespace = get_kernel_namespace(f=f, backend_idx=backend_idx).replace(
  1362. "::native", ""
  1363. )
  1364. ns_definitions[kernel_namespace].extend(
  1365. ns_gen(f),
  1366. )
  1367. anonymous_definitions[kernel_namespace].extend(
  1368. anonymous_gen(f),
  1369. )
  1370. namespace = (
  1371. f.namespace if isinstance(f, NativeFunction) else f.functional.namespace
  1372. )
  1373. if namespace not in registrations[kernel_namespace]:
  1374. registrations[kernel_namespace] = defaultdict(list)
  1375. registrations[kernel_namespace][namespace].extend(
  1376. reg_gen(f),
  1377. )
  1378. for kernel_namespace in ns_definitions:
  1379. if len(ns_definitions[kernel_namespace]) == 0:
  1380. continue
  1381. ns_helper = NamespaceHelper(namespace_str=kernel_namespace)
  1382. registration_body = ""
  1383. for namespace in registrations[kernel_namespace]:
  1384. if not registrations[kernel_namespace][namespace]:
  1385. continue
  1386. registration_body += f"""
  1387. TORCH_LIBRARY_IMPL({namespace}, {dispatch_key}, m) {{
  1388. {newline.join(registrations[kernel_namespace][namespace])}
  1389. }};"""
  1390. definitions.extend(
  1391. fm.substitute_with_template(
  1392. "RegisterDispatchDefinitions.ini",
  1393. lambda: {
  1394. "ns_prologue": ns_helper.prologue,
  1395. "ns_epilogue": ns_helper.epilogue,
  1396. "dispatch_helpers": dest.gen_registration_helpers(backend_idx)
  1397. if gen_dispatch_helpers
  1398. else [],
  1399. "dispatch_anonymous_definitions": anonymous_definitions[
  1400. kernel_namespace
  1401. ],
  1402. "static_init_dispatch_registrations": ""
  1403. if skip_dispatcher_op_registration
  1404. else registration_body,
  1405. "deferred_dispatch_registrations": "",
  1406. "dispatch_namespace": dispatch_key.lower(),
  1407. "dispatch_namespaced_definitions": ns_definitions[kernel_namespace],
  1408. },
  1409. ).split(newline)
  1410. )
  1411. return definitions
  1412. # Return native function declarations grouped by dispatch key and custom namespace.
  1413. # Used in CPUFunctions_inl.h and etc.
  1414. def get_namespaced_declaration(
  1415. *,
  1416. grouped_native_functions: Sequence[Union[NativeFunction, NativeFunctionsGroup]],
  1417. dispatch_key: DispatchKey,
  1418. backend_idx: BackendIndex,
  1419. selector: SelectiveBuilder,
  1420. rocm: bool,
  1421. symint: bool,
  1422. ) -> List[str]:
  1423. declarations: List[str] = []
  1424. ns_grouped_kernels: Dict[str, List[str]] = defaultdict(list)
  1425. newline = "\n"
  1426. func = dest.RegisterDispatchKey(
  1427. backend_idx,
  1428. Target.NAMESPACED_DECLARATION,
  1429. selector,
  1430. rocm=rocm,
  1431. class_method_name=None,
  1432. skip_dispatcher_op_registration=False,
  1433. symint=symint,
  1434. )
  1435. for f in grouped_native_functions:
  1436. namespace = get_kernel_namespace(f=f, backend_idx=backend_idx).replace(
  1437. "native", dispatch_key.lower()
  1438. )
  1439. ns_grouped_kernels[namespace].extend(
  1440. func(f),
  1441. )
  1442. for namespace, kernels in ns_grouped_kernels.items():
  1443. if len(kernels) == 0:
  1444. continue
  1445. ns_helper = NamespaceHelper(
  1446. namespace_str=namespace, entity_name="", max_level=3
  1447. )
  1448. ordered_kernels = list(OrderedDict.fromkeys(kernels))
  1449. declarations.extend(
  1450. f"""
  1451. {ns_helper.prologue}
  1452. {newline.join(ordered_kernels)}
  1453. {ns_helper.epilogue}
  1454. """.split(
  1455. newline
  1456. )
  1457. )
  1458. return declarations
  1459. # Return native function schema registration code for aten and other namespaces.
  1460. def get_native_function_schema_registrations(
  1461. *,
  1462. native_functions: Sequence[NativeFunction],
  1463. schema_selector: SelectiveBuilder,
  1464. ) -> Tuple[List[str], str]:
  1465. ns_native_functions: Dict[str, List[NativeFunction]] = defaultdict(list)
  1466. for native_function in native_functions:
  1467. ns_native_functions[native_function.namespace].append(native_function)
  1468. schema_registrations = ""
  1469. aten_schema_registrations = []
  1470. custom_namespace = None
  1471. for namespace, funcs in ns_native_functions.items():
  1472. schema_registrations_body = list(
  1473. mapMaybe(RegisterSchema(schema_selector), funcs)
  1474. )
  1475. # NB: we have to separate aten namespace registration from other namespaces,
  1476. # because in the template we hardcoded an operator for ATen already.
  1477. if namespace == "aten":
  1478. aten_schema_registrations = schema_registrations_body
  1479. else:
  1480. custom_namespace = namespace
  1481. tab = "\t"
  1482. # if the namespace is predefined, we should use define a library fragment
  1483. # instead of a new library
  1484. torch_library_macro = (
  1485. "TORCH_LIBRARY_FRAGMENT"
  1486. if namespace in FRAGMENT_NAMESPACES
  1487. else "TORCH_LIBRARY"
  1488. )
  1489. schema_registrations += f"""
  1490. {torch_library_macro}({custom_namespace}, m) {{
  1491. {tab.join(schema_registrations_body)}
  1492. }};"""
  1493. return (aten_schema_registrations, schema_registrations)
  1494. def gen_aggregated_headers(
  1495. *,
  1496. native_functions: Sequence[NativeFunction],
  1497. grouped_native_functions: Sequence[Union[NativeFunction, NativeFunctionsGroup]],
  1498. structured_native_functions: Sequence[NativeFunctionsGroup],
  1499. static_dispatch_idx: List[BackendIndex],
  1500. selector: SelectiveBuilder,
  1501. backend_indices: Dict[DispatchKey, BackendIndex],
  1502. cpu_fm: FileManager,
  1503. cuda_fm: FileManager,
  1504. functions_keys: Set[DispatchKey],
  1505. dispatch_keys: Sequence[DispatchKey],
  1506. rocm: bool,
  1507. ) -> None:
  1508. # Buck doesn't support dynamic output files, so we aggregate all operator
  1509. # headers into a single file
  1510. cpu_fm.write(
  1511. "NativeMetaFunctions.h",
  1512. lambda: {
  1513. "NativeMetaFunctions_includes": [],
  1514. "NativeMetaFunctions_declarations": list(
  1515. mapMaybe(compute_meta_function_declaration, structured_native_functions)
  1516. ),
  1517. },
  1518. )
  1519. method_native_functions = [
  1520. fn for fn in native_functions if Variant.method in fn.variants
  1521. ]
  1522. non_method_native_functions = [
  1523. fn for fn in native_functions if fn not in method_native_functions
  1524. ]
  1525. cpu_fm.write(
  1526. "MethodOperators.h",
  1527. lambda: {
  1528. "MethodOperators_includes": [],
  1529. "MethodOperators_declarations": list(
  1530. mapMaybe(
  1531. ComputeOperators(
  1532. Target.DECLARATION,
  1533. static_dispatch_backend_indices=static_dispatch_idx,
  1534. ),
  1535. method_native_functions,
  1536. )
  1537. ),
  1538. },
  1539. )
  1540. cpu_fm.write(
  1541. "Operators.h",
  1542. lambda: {
  1543. "Operators_includes": ["#include <ATen/MethodOperators.h>"],
  1544. "Operators_declarations": list(
  1545. mapMaybe(
  1546. ComputeOperators(
  1547. Target.DECLARATION,
  1548. static_dispatch_backend_indices=static_dispatch_idx,
  1549. ),
  1550. non_method_native_functions,
  1551. )
  1552. ),
  1553. },
  1554. )
  1555. cpu_fm.write(
  1556. "Functions.h",
  1557. lambda: {
  1558. "static_dispatch_extra_headers": static_dispatch_extra_headers(
  1559. static_dispatch_idx
  1560. ),
  1561. "Functions_includes": ["#include <ATen/Operators.h>"],
  1562. "Functions_declarations": list(
  1563. mapMaybe(
  1564. ComputeFunction(),
  1565. native_functions,
  1566. )
  1567. ),
  1568. },
  1569. )
  1570. declarations = get_native_function_declarations(
  1571. grouped_native_functions=grouped_native_functions,
  1572. backend_indices=backend_indices,
  1573. )
  1574. cpu_fm.write(
  1575. "NativeFunctions.h",
  1576. lambda: {
  1577. "NativeFunctions_includes": ["#include <ATen/NativeMetaFunctions.h>"],
  1578. "NativeFunctions_declarations": declarations,
  1579. },
  1580. )
  1581. for dispatch_key in dispatch_keys:
  1582. fm = cuda_fm if is_cuda_dispatch_key(dispatch_key) else cpu_fm
  1583. if dispatch_key in functions_keys:
  1584. inl_headers = f"#include <ATen/{dispatch_key}Functions_inl.h>"
  1585. fm.write_with_template(
  1586. f"{dispatch_key}Functions.h",
  1587. "DispatchKeyFunctions.h",
  1588. lambda: {
  1589. "dispatch_key": str(dispatch_key),
  1590. "inline_headers": inl_headers,
  1591. },
  1592. )
  1593. fm.write_with_template(
  1594. f"{dispatch_key}Functions_inl.h",
  1595. "DispatchKeyFunctions_inl.h",
  1596. lambda: {
  1597. "DispatchKeyFunctions_inl_includes": [],
  1598. "dispatch_namespace": dispatch_key.lower(),
  1599. "dispatch_namespaced_declarations": get_namespaced_declaration(
  1600. grouped_native_functions=grouped_native_functions,
  1601. dispatch_key=dispatch_key,
  1602. backend_idx=backend_indices[dispatch_key],
  1603. selector=selector,
  1604. rocm=rocm,
  1605. symint=True,
  1606. ),
  1607. },
  1608. )
  1609. del fm
  1610. def gen_per_operator_headers(
  1611. *,
  1612. native_functions: Sequence[NativeFunction],
  1613. grouped_native_functions: Sequence[Union[NativeFunction, NativeFunctionsGroup]],
  1614. static_dispatch_idx: List[BackendIndex],
  1615. selector: SelectiveBuilder,
  1616. backend_indices: Dict[DispatchKey, BackendIndex],
  1617. cpu_fm: FileManager,
  1618. cuda_fm: FileManager,
  1619. ops_fm: FileManager,
  1620. functions_keys: Set[DispatchKey],
  1621. dispatch_keys: Sequence[DispatchKey],
  1622. rocm: bool,
  1623. ) -> None:
  1624. # For CMake builds, split operator declarations into separate headers in
  1625. # the ATen/ops folder to split up header dependencies
  1626. functions_by_root_name: Dict[str, List[NativeFunction]] = defaultdict(lambda: [])
  1627. for fn in native_functions:
  1628. functions_by_root_name[fn.root_name].append(fn)
  1629. grouped_functions_by_root_name: Dict[
  1630. str, List[Union[NativeFunction, NativeFunctionsGroup]]
  1631. ] = defaultdict(lambda: [])
  1632. for group in grouped_native_functions:
  1633. name = group.root_name
  1634. grouped_functions_by_root_name[name].append(group)
  1635. for name, functions in functions_by_root_name.items():
  1636. ops_fm.write_with_template(
  1637. f"{name}_ops.h",
  1638. "Operator.h",
  1639. lambda: {
  1640. "declarations": list(
  1641. mapMaybe(
  1642. ComputeOperators(
  1643. Target.DECLARATION,
  1644. static_dispatch_backend_indices=static_dispatch_idx,
  1645. ),
  1646. functions,
  1647. )
  1648. ),
  1649. },
  1650. )
  1651. ops_fm.write_with_template(
  1652. f"{name}.h",
  1653. "Function.h",
  1654. lambda: {
  1655. "static_dispatch_ops_headers": list(
  1656. mapMaybe(
  1657. lambda fn: static_dispatch_ops_header(
  1658. fn, backend_index=static_dispatch_idx
  1659. ),
  1660. functions,
  1661. )
  1662. ),
  1663. "operator_includes": f"#include <ATen/ops/{name}_ops.h>",
  1664. "function_definitions": list(
  1665. mapMaybe(
  1666. ComputeFunction(),
  1667. functions,
  1668. )
  1669. ),
  1670. },
  1671. )
  1672. grouped_functions = grouped_functions_by_root_name.get(name, [])
  1673. structured_functions = [
  1674. fn
  1675. for fn in grouped_functions
  1676. if isinstance(fn, NativeFunctionsGroup) and fn.structured
  1677. ]
  1678. is_structured = len(structured_functions) > 0
  1679. if is_structured:
  1680. ops_fm.write_with_template(
  1681. f"{name}_meta.h",
  1682. "NativeMetaFunction.h",
  1683. lambda: {
  1684. "meta_function_declarations": list(
  1685. mapMaybe(
  1686. compute_meta_function_declaration, structured_functions
  1687. )
  1688. ),
  1689. },
  1690. )
  1691. declarations = get_native_function_declarations(
  1692. grouped_native_functions=grouped_functions,
  1693. backend_indices=backend_indices,
  1694. native_function_decl_gen=dest.compute_native_function_declaration,
  1695. )
  1696. ops_fm.write_with_template(
  1697. f"{name}_native.h",
  1698. "NativeFunction.h",
  1699. lambda: {
  1700. "extra_includes": (
  1701. f"#include <ATen/ops/{name}_meta.h>" if is_structured else []
  1702. ),
  1703. "native_function_declarations": declarations,
  1704. },
  1705. )
  1706. for category, suffix in [
  1707. ("Functions", ""),
  1708. ("Operators", "_ops"),
  1709. ("NativeMetaFunctions", "_meta"),
  1710. ("NativeFunctions", "_native"),
  1711. ]:
  1712. cpu_fm.write(
  1713. f"{category}.h",
  1714. lambda: {
  1715. f"{category}_includes": [
  1716. f"#include <ATen/ops/{name}{suffix}.h>"
  1717. for name in sorted(functions_by_root_name.keys())
  1718. ],
  1719. f"{category}_declarations": [],
  1720. },
  1721. )
  1722. for dispatch_key in dispatch_keys:
  1723. if dispatch_key not in functions_keys:
  1724. continue
  1725. dispatch_namespace = dispatch_key.lower()
  1726. dispatch_names = []
  1727. for name, functions in functions_by_root_name.items():
  1728. grouped_functions = grouped_functions_by_root_name.get(name, [])
  1729. declarations = list(
  1730. concatMap(
  1731. dest.RegisterDispatchKey(
  1732. backend_indices[dispatch_key],
  1733. Target.NAMESPACED_DECLARATION,
  1734. selector,
  1735. rocm=rocm,
  1736. symint=True,
  1737. class_method_name=None,
  1738. skip_dispatcher_op_registration=False,
  1739. ),
  1740. grouped_functions,
  1741. )
  1742. )
  1743. if len(declarations) == 0:
  1744. continue
  1745. dispatch_names.append(name)
  1746. ops_fm.write_with_template(
  1747. f"{name}_{dispatch_namespace}_dispatch.h",
  1748. "DispatchKeyFunction.h",
  1749. lambda: {
  1750. "dispatch_namespace": dispatch_namespace,
  1751. "dispatch_namespaced_declarations": declarations,
  1752. },
  1753. )
  1754. fm = cuda_fm if is_cuda_dispatch_key(dispatch_key) else cpu_fm
  1755. inl_headers = f"#include <ATen/{dispatch_key}Functions_inl.h>"
  1756. fm.write_with_template(
  1757. f"{dispatch_key}Functions.h",
  1758. "DispatchKeyFunctions.h",
  1759. lambda: {
  1760. "dispatch_key": str(dispatch_key),
  1761. "inline_headers": inl_headers,
  1762. },
  1763. )
  1764. fm.write_with_template(
  1765. f"{dispatch_key}Functions_inl.h",
  1766. "DispatchKeyFunctions_inl.h",
  1767. lambda: {
  1768. "dispatch_namespace": dispatch_namespace,
  1769. "DispatchKeyFunctions_inl_includes": [
  1770. f"#include <ATen/ops/{name}_{dispatch_namespace}_dispatch.h>"
  1771. for name in sorted(dispatch_names)
  1772. ],
  1773. "dispatch_namespaced_declarations": [],
  1774. },
  1775. )
  1776. del fm
  1777. cpu_fm.write(
  1778. "MethodOperators.h",
  1779. lambda: {
  1780. "MethodOperators_includes": sorted(
  1781. f"#include <ATen/ops/{name}_ops.h>"
  1782. for name, functions in functions_by_root_name.items()
  1783. if any(Variant.method in fn.variants for fn in functions)
  1784. ),
  1785. "MethodOperators_declarations": [],
  1786. },
  1787. )
  1788. def gen_headers(
  1789. *,
  1790. native_functions: Sequence[NativeFunction],
  1791. valid_tags: Set[str],
  1792. grouped_native_functions: Sequence[Union[NativeFunction, NativeFunctionsGroup]],
  1793. structured_native_functions: Sequence[NativeFunctionsGroup],
  1794. static_dispatch_idx: List[BackendIndex],
  1795. selector: SelectiveBuilder,
  1796. backend_indices: Dict[DispatchKey, BackendIndex],
  1797. core_fm: FileManager,
  1798. cpu_fm: FileManager,
  1799. cuda_fm: FileManager,
  1800. ops_fm: FileManager,
  1801. dispatch_keys: Sequence[DispatchKey],
  1802. functions_keys: Set[DispatchKey],
  1803. rocm: bool,
  1804. per_operator_headers: bool,
  1805. ) -> None:
  1806. if per_operator_headers:
  1807. gen_per_operator_headers(
  1808. native_functions=native_functions,
  1809. grouped_native_functions=grouped_native_functions,
  1810. static_dispatch_idx=static_dispatch_idx,
  1811. selector=selector,
  1812. backend_indices=backend_indices,
  1813. cpu_fm=cpu_fm,
  1814. cuda_fm=cuda_fm,
  1815. ops_fm=ops_fm,
  1816. dispatch_keys=dispatch_keys,
  1817. functions_keys=functions_keys,
  1818. rocm=rocm,
  1819. )
  1820. else:
  1821. gen_aggregated_headers(
  1822. native_functions=native_functions,
  1823. grouped_native_functions=grouped_native_functions,
  1824. structured_native_functions=structured_native_functions,
  1825. static_dispatch_idx=static_dispatch_idx,
  1826. selector=selector,
  1827. backend_indices=backend_indices,
  1828. cpu_fm=cpu_fm,
  1829. cuda_fm=cuda_fm,
  1830. dispatch_keys=dispatch_keys,
  1831. functions_keys=functions_keys,
  1832. rocm=rocm,
  1833. )
  1834. core_fm.write(
  1835. "TensorBody.h",
  1836. lambda: {
  1837. "tensor_method_declarations": list(
  1838. mapMaybe(
  1839. ComputeTensorMethod(
  1840. target=Target.DECLARATION,
  1841. static_dispatch_backend_indices=static_dispatch_idx,
  1842. ),
  1843. native_functions,
  1844. )
  1845. ),
  1846. "tensor_method_definitions": list(
  1847. mapMaybe(
  1848. ComputeTensorMethod(
  1849. target=Target.DEFINITION,
  1850. static_dispatch_backend_indices=static_dispatch_idx,
  1851. ),
  1852. native_functions,
  1853. )
  1854. ),
  1855. },
  1856. )
  1857. cpu_fm.write(
  1858. "RedispatchFunctions.h",
  1859. lambda: {
  1860. "function_redispatch_definitions": list(
  1861. mapMaybe(ComputeRedispatchFunction(), native_functions)
  1862. ),
  1863. },
  1864. )
  1865. cpu_fm.write(
  1866. "RegistrationDeclarations.h",
  1867. lambda: {
  1868. "registration_declarations": [
  1869. compute_registration_declarations(f, backend_indices)
  1870. for f in native_functions
  1871. ],
  1872. },
  1873. )
  1874. cpu_fm.write(
  1875. "VmapGeneratedPlumbing.h", lambda: gen_all_vmap_plumbing(native_functions)
  1876. )
  1877. def gen_aten_interned_strings() -> Dict[str, str]:
  1878. attrs = set() # All function argument names
  1879. names = set() # All ATen function names
  1880. for func in native_functions:
  1881. names.add(str(func.func.name.name))
  1882. # Some operators don't have a functional variant but we still create a
  1883. # symbol without the underscore
  1884. names.add(func.func.name.name.base)
  1885. for arg in func.func.schema_order_arguments():
  1886. attrs.add(arg.name)
  1887. # These are keywords in C++, so aren't valid symbol names
  1888. # https://en.cppreference.com/w/cpp/language/operator_alternative
  1889. names -= {
  1890. "and",
  1891. "and_eq",
  1892. "bitand",
  1893. "bitor",
  1894. "compl",
  1895. "not",
  1896. "not_eq",
  1897. "or",
  1898. "or_eq",
  1899. "xor",
  1900. "xor_eq",
  1901. }
  1902. return {
  1903. "aten_symbols": " \\\n".join(
  1904. [f"_(aten, {name})" for name in sorted(names)]
  1905. ),
  1906. "attr_symbols": " \\\n".join(
  1907. [f"_(attr, {name})" for name in sorted(attrs)]
  1908. ),
  1909. }
  1910. core_fm.write("aten_interned_strings.h", gen_aten_interned_strings)
  1911. def gen_tags_enum() -> Dict[str, str]:
  1912. return {"enum_of_valid_tags": (",\n".join(sorted(valid_tags)))}
  1913. core_fm.write("enum_tag.h", gen_tags_enum)
  1914. def gen_source_files(
  1915. *,
  1916. native_functions: Sequence[NativeFunction],
  1917. grouped_native_functions: Sequence[Union[NativeFunction, NativeFunctionsGroup]],
  1918. structured_native_functions: Sequence[NativeFunctionsGroup],
  1919. view_groups: Sequence[NativeFunctionsViewGroup],
  1920. selector: SelectiveBuilder,
  1921. static_dispatch_idx: List[BackendIndex],
  1922. backend_indices: Dict[DispatchKey, BackendIndex],
  1923. core_fm: FileManager,
  1924. cpu_fm: FileManager,
  1925. cpu_vec_fm: FileManager,
  1926. cuda_fm: FileManager,
  1927. dispatch_keys: Sequence[DispatchKey],
  1928. functions_keys: Set[DispatchKey],
  1929. rocm: bool,
  1930. force_schema_registration: bool,
  1931. per_operator_headers: bool,
  1932. skip_dispatcher_op_registration: bool,
  1933. ) -> None:
  1934. extra_cuda_headers = """\
  1935. #include <c10/cuda/CUDAGuard.h>
  1936. #include <ATen/cuda/ATenCUDAGeneral.h>
  1937. #include <ATen/cuda/CUDADevice.h>
  1938. #include <ATen/cuda/CUDAContext.h>"""
  1939. if rocm:
  1940. extra_cuda_headers = """\
  1941. #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
  1942. #include <ATen/hip/ATenHIPGeneral.h>
  1943. #include <ATen/hip/HIPDevice.h>
  1944. #include <ATen/hip/HIPContext.h>"""
  1945. for dispatch_key in dispatch_keys:
  1946. fm = cuda_fm if is_cuda_dispatch_key(dispatch_key) else cpu_fm
  1947. if per_operator_headers:
  1948. def operator_headers() -> List[str]:
  1949. headers = []
  1950. for g in grouped_native_functions:
  1951. is_registered = False
  1952. if backend_index.has_kernel(g):
  1953. is_registered = True
  1954. # The above has_kernel test on a group will only test for
  1955. # the existence of out dispatch, because that's how
  1956. # structured kernels work. But sometimes functions can be
  1957. # grouped but not be structured, and then you need to check
  1958. # each individual piece, as they may have manual dispatch
  1959. # entries.
  1960. elif isinstance(g, NativeFunctionsGroup) and any(
  1961. backend_index.has_kernel(fn) for fn in g.functions()
  1962. ):
  1963. is_registered = True
  1964. # TODO: this condition is a bit questionable
  1965. # (It has to do with the fact that structured kernels get generated kernels
  1966. # to the Meta + CompositeExplicitAutogradNonFunctional keys).
  1967. elif g.structured and dispatch_key in (
  1968. DispatchKey.Meta,
  1969. DispatchKey.CompositeExplicitAutogradNonFunctional,
  1970. ):
  1971. is_registered = True
  1972. if not is_registered:
  1973. continue
  1974. headers.append(f"#include <ATen/ops/{g.root_name}_native.h>")
  1975. if (
  1976. dispatch_key
  1977. == DispatchKey.CompositeExplicitAutogradNonFunctional
  1978. ):
  1979. headers.append(f"#include <ATen/ops/{g.root_name}.h>")
  1980. if dispatch_key in functions_keys:
  1981. headers.append(
  1982. f"#include <ATen/ops/{g.root_name}_{dispatch_namespace}_dispatch.h>"
  1983. )
  1984. return sorted(set(headers))
  1985. else:
  1986. def operator_headers() -> List[str]:
  1987. headers = ["#include <ATen/NativeFunctions.h>"]
  1988. if dispatch_key == DispatchKey.CompositeExplicitAutogradNonFunctional:
  1989. headers.append("#include <ATen/Functions.h>")
  1990. if dispatch_key in functions_keys:
  1991. headers.append(f"#include <ATen/{dispatch_key!s}Functions.h>")
  1992. return headers
  1993. backend_index = backend_indices[dispatch_key]
  1994. ns_grouped_native_functions = defaultdict(list)
  1995. for grouped_native_function in grouped_native_functions:
  1996. namespace = (
  1997. grouped_native_function.namespace
  1998. if isinstance(grouped_native_function, NativeFunction)
  1999. else grouped_native_function.functional.namespace
  2000. )
  2001. ns_grouped_native_functions[namespace].append(grouped_native_function)
  2002. dispatch_namespace = str(dispatch_key).lower()
  2003. # CompositeImplicitAutogradNestdTensor does not currently user the helpers generated
  2004. # compilation will fail when `-Werror=unused-function` flag is set
  2005. gen_dispatch_helpers: bool = (
  2006. dispatch_key != DispatchKey.CompositeImplicitAutogradNestedTensor
  2007. )
  2008. dispatch_definitions = get_native_function_definitions(
  2009. fm=fm,
  2010. grouped_native_functions=grouped_native_functions,
  2011. dispatch_key=dispatch_key,
  2012. backend_idx=backend_index,
  2013. selector=selector,
  2014. rocm=rocm,
  2015. symint=True,
  2016. skip_dispatcher_op_registration=skip_dispatcher_op_registration,
  2017. gen_dispatch_helpers=gen_dispatch_helpers,
  2018. )
  2019. fm.write_with_template(
  2020. f"Register{dispatch_key}.cpp",
  2021. "RegisterDispatchKey.cpp",
  2022. lambda: {
  2023. "extra_cuda_headers": extra_cuda_headers
  2024. if is_cuda_dispatch_key(dispatch_key)
  2025. else "",
  2026. "external_backend_headers": "",
  2027. "dispatch_headers": dest.gen_registration_headers(
  2028. backend_index, per_operator_headers, rocm
  2029. ),
  2030. "ops_headers": operator_headers(),
  2031. "dispatch_helpers": "",
  2032. "dispatch_definitions": dispatch_definitions,
  2033. },
  2034. )
  2035. for g in structured_native_functions:
  2036. if not g.out.ufunc_inner_loop or not is_ufunc_dispatch_key(dispatch_key):
  2037. continue
  2038. name = g.functional.func.name.name
  2039. if dispatch_key is DispatchKey.CPU:
  2040. assert fm is cpu_fm
  2041. fm.write_with_template(
  2042. f"UfuncCPU_{name}.cpp",
  2043. "UfuncCPU.cpp",
  2044. lambda: {
  2045. "meta_declaration": compute_meta_function_declaration(g),
  2046. "native_declaration": dest.compute_native_function_declaration(
  2047. g, backend_indices[dispatch_key]
  2048. ),
  2049. "native_definitions": dest.compute_ufunc_cpu(g),
  2050. },
  2051. )
  2052. cpu_vec_fm.write_with_template(
  2053. f"UfuncCPUKernel_{name}.cpp",
  2054. "UfuncCPUKernel.cpp",
  2055. lambda: {
  2056. "name": name,
  2057. "native_definitions": dest.compute_ufunc_cpu_kernel(g),
  2058. },
  2059. )
  2060. elif dispatch_key is DispatchKey.CUDA:
  2061. cuda_headers = "#include <ATen/native/cuda/Loops.cuh>"
  2062. if rocm:
  2063. cuda_headers = "#include <ATen/native/hip/Loops.cuh>"
  2064. fm.write_with_template(
  2065. f"UfuncCUDA_{name}.cu",
  2066. "UfuncCUDA.cu",
  2067. lambda: {
  2068. "name": name,
  2069. "cuda_headers": cuda_headers,
  2070. "meta_declaration": compute_meta_function_declaration(g),
  2071. "native_declaration": dest.compute_native_function_declaration(
  2072. g, backend_indices[dispatch_key]
  2073. ),
  2074. "native_definitions": dest.compute_ufunc_cuda(g),
  2075. },
  2076. )
  2077. else:
  2078. raise AssertionError(f"unrecognized {dispatch_key} for ufunc")
  2079. del fm
  2080. # BackendSelect is generated specially
  2081. def gen_backend_select() -> Dict[str, List[str]]:
  2082. relevant_fns = [
  2083. fn for fn in native_functions if needs_backend_select(fn, selector)
  2084. ]
  2085. return {
  2086. "ops_headers": [
  2087. f"#include <ATen/ops/{fn.root_name}_ops.h>" for fn in relevant_fns
  2088. ],
  2089. "backend_select_method_definitions": list(
  2090. mapMaybe(
  2091. ComputeBackendSelect(Target.DEFINITION, selector), relevant_fns
  2092. )
  2093. ),
  2094. "backend_select_function_registrations": list(
  2095. mapMaybe(
  2096. ComputeBackendSelect(Target.REGISTRATION, selector), relevant_fns
  2097. )
  2098. ),
  2099. }
  2100. cpu_fm.write("RegisterBackendSelect.cpp", gen_backend_select)
  2101. schema_selector = selector
  2102. if force_schema_registration:
  2103. schema_selector = SelectiveBuilder.get_nop_selector()
  2104. (
  2105. aten_schema_registrations,
  2106. schema_registrations,
  2107. ) = get_native_function_schema_registrations(
  2108. native_functions=native_functions, schema_selector=schema_selector
  2109. )
  2110. cpu_fm.write(
  2111. "RegisterSchema.cpp",
  2112. lambda: {
  2113. "aten_schema_registrations": []
  2114. if skip_dispatcher_op_registration
  2115. else aten_schema_registrations,
  2116. "schema_registrations": []
  2117. if skip_dispatcher_op_registration
  2118. else schema_registrations,
  2119. },
  2120. )
  2121. def key_func(
  2122. fn: Union[NativeFunction, NativeFunctionsGroup, NativeFunctionsViewGroup]
  2123. ) -> str:
  2124. return fn.root_name
  2125. cpu_fm.write_sharded(
  2126. "Operators.cpp",
  2127. native_functions,
  2128. key_fn=key_func,
  2129. env_callable=lambda fn: {
  2130. "operator_headers": [f"#include <ATen/ops/{fn.root_name}.h>"],
  2131. "definitions": [
  2132. ComputeOperators(
  2133. Target.DEFINITION,
  2134. static_dispatch_backend_indices=static_dispatch_idx,
  2135. )(fn)
  2136. ],
  2137. },
  2138. base_env={
  2139. "static_dispatch_extra_headers": static_dispatch_extra_headers(
  2140. static_dispatch_idx
  2141. ),
  2142. },
  2143. num_shards=5,
  2144. sharded_keys={
  2145. "operator_headers",
  2146. "definitions",
  2147. "static_dispatch_extra_headers",
  2148. },
  2149. )
  2150. cpu_fm.write("Functions.cpp", lambda: {})
  2151. core_fm.write("TensorMethods.cpp", lambda: {})
  2152. core_fm.write(
  2153. "ATenOpList.cpp",
  2154. lambda: {
  2155. "aten_ops": list(mapMaybe(compute_aten_op, native_functions)),
  2156. },
  2157. )
  2158. def functionalization_env_callable(
  2159. g: Union[NativeFunction, NativeFunctionsGroup, NativeFunctionsViewGroup]
  2160. ) -> Dict[str, List[str]]:
  2161. def gen_op_headers(
  2162. g: Union[NativeFunction, NativeFunctionsGroup, NativeFunctionsViewGroup]
  2163. ) -> List[str]:
  2164. if isinstance(g, NativeFunctionsViewGroup):
  2165. # view ops always get a functionalization kernel
  2166. headers = [
  2167. f"#include <ATen/ops/{g.view.root_name}_native.h>",
  2168. f"#include <ATen/ops/{g.view.root_name}_ops.h>",
  2169. ]
  2170. if g.view_copy is not None:
  2171. headers += [
  2172. f"#include <ATen/ops/{g.view_copy.root_name}_native.h>",
  2173. f"#include <ATen/ops/{g.view_copy.root_name}_ops.h>",
  2174. ]
  2175. return headers
  2176. elif isinstance(g, NativeFunctionsGroup):
  2177. headers = [
  2178. f"#include <ATen/ops/{g.functional.root_name}_native.h>",
  2179. f"#include <ATen/ops/{g.functional.root_name}_ops.h>",
  2180. f"#include <ATen/ops/{g.out.root_name}_native.h>",
  2181. f"#include <ATen/ops/{g.out.root_name}_ops.h>",
  2182. ]
  2183. if g.inplace is not None:
  2184. headers += [
  2185. f"#include <ATen/ops/{g.inplace.root_name}_native.h>",
  2186. f"#include <ATen/ops/{g.inplace.root_name}_ops.h>",
  2187. ]
  2188. if g.mutable is not None:
  2189. headers += [
  2190. f"#include <ATen/ops/{g.mutable.root_name}_native.h>",
  2191. f"#include <ATen/ops/{g.mutable.root_name}_ops.h>",
  2192. ]
  2193. return headers
  2194. else:
  2195. return [
  2196. f"#include <ATen/ops/{g.root_name}_native.h>",
  2197. f"#include <ATen/ops/{g.root_name}_ops.h>",
  2198. ]
  2199. return {
  2200. "ops_headers": gen_op_headers(g),
  2201. "func_definitions": gen_functionalization_definition(
  2202. selector,
  2203. g,
  2204. ),
  2205. "func_registrations": gen_functionalization_registration(
  2206. selector,
  2207. g,
  2208. backend_indices[DispatchKey.CompositeImplicitAutograd],
  2209. ),
  2210. }
  2211. all_groups: List[
  2212. Union[NativeFunction, NativeFunctionsGroup, NativeFunctionsViewGroup]
  2213. ] = list(structured_native_functions) + list(
  2214. view_groups # type: ignore[assignment, arg-type, operator]
  2215. )
  2216. # Note: all operators that functionalization needs to handle (mutable and aliasing ops) should be grouped properly.
  2217. # The only reason we really need to deal with direct NativeFunctions here (instead of the groups) is because:
  2218. # (1) We can provide better error checking (error out if someone introduces a mutable op that doesn't obey the grouping logic)
  2219. # (2) functionalization needs to manually register CompositeImplicitAutograd kernels, which might not be grouped.
  2220. # Although this could go away long-term if we add a dedicated dispatch key for decompositions.
  2221. structured_map: Dict[OperatorName, NativeFunction] = {
  2222. f.func.name: f
  2223. for f in concatMap(lambda g: list(g.functions()), structured_native_functions)
  2224. }
  2225. view_map: Dict[OperatorName, NativeFunction] = {
  2226. f.func.name: f for f in concatMap(lambda g: list(g.functions()), view_groups)
  2227. }
  2228. for f in native_functions:
  2229. if f.func.name not in structured_map and f.func.name not in view_map:
  2230. all_groups.append(f)
  2231. cpu_fm.write_sharded(
  2232. "RegisterFunctionalization.cpp",
  2233. all_groups,
  2234. key_fn=key_func,
  2235. env_callable=functionalization_env_callable,
  2236. num_shards=4,
  2237. sharded_keys={
  2238. "ops_headers",
  2239. "func_definitions",
  2240. "func_registrations",
  2241. "func_add_back_views_definitions",
  2242. "func_add_back_views_registrations",
  2243. },
  2244. )
  2245. cpu_fm.write(
  2246. "FunctionalInverses.h",
  2247. lambda: {
  2248. "view_inverse_declarations": list(
  2249. mapMaybe(
  2250. lambda g: gen_functionalization_view_inverse_declaration(
  2251. selector, g
  2252. ),
  2253. view_groups,
  2254. )
  2255. )
  2256. },
  2257. )
  2258. # Note [view_copy NativeFunctions]
  2259. # Every view operator in native_functions.yaml that is not CompositeImplicitAutograd
  2260. # needs to have a corresponding non-aliasing {view}_copy variant.
  2261. # Backends that use functionalization and don't know how to handle aliasing ops
  2262. # are expected to implement kernels for these {view}_copy kernels instead.
  2263. # The code for {view}_copy operators in core is pretty boilerplate-heavy however,
  2264. # so we codegen the following:
  2265. # (1) A CompositeExplicitAutogradNonFunctional kernel for every {view}_copy operator.
  2266. # These are never explicitly invoked by the functionalization pass,
  2267. # but they could theoretically be called from user code (I added these kernels for completeness,
  2268. # since the ops are part of the public API).
  2269. # (2) A derivative formula for every {view}_copy operator
  2270. # {view}_copy operators can re-use the same derivative formulas as their {view} op counterparts,
  2271. # so rather than stamping all of the entries out in derivatives.yaml,
  2272. # we codegen them in.
  2273. # This is similar to how autograd codegen doesn't require inplace ops to have a derivatives.yaml entry.
  2274. cpu_fm.write(
  2275. "CompositeViewCopyKernels.cpp",
  2276. lambda: {
  2277. "ops_headers": [
  2278. "\n".join(
  2279. f"#include <ATen/ops/{f.root_name}_ops.h>\n"
  2280. # NB: this include is important as it ensures we
  2281. # set the visibility on generated view_copy kernels
  2282. # correctly
  2283. f"#include <ATen/ops/{f.root_name}_native.h>"
  2284. for f in (
  2285. [g.view] if g.view_copy is None else [g.view, g.view_copy]
  2286. )
  2287. )
  2288. for g in view_groups
  2289. ]
  2290. + [
  2291. "\n".join(
  2292. f"#include <ATen/ops/{f.root_name}_ops.h>"
  2293. for f in [g.inplace, g.mutable, g.functional]
  2294. if f is not None and "generated" not in f.tags
  2295. )
  2296. for g in structured_native_functions
  2297. ],
  2298. "CompositeViewCopyKernel_Definitions": list(
  2299. mapMaybe(
  2300. GenCompositeViewCopyKernel(
  2301. backend_indices[
  2302. DispatchKey.CompositeExplicitAutogradNonFunctional
  2303. ]
  2304. ),
  2305. view_groups,
  2306. )
  2307. ),
  2308. "GeneratedCompositeFunctional_Definitions": list(
  2309. mapMaybe(
  2310. gen_composite_functional_kernel,
  2311. structured_native_functions,
  2312. )
  2313. ),
  2314. "GeneratedCompositeOut_Definitions": list(
  2315. mapMaybe(
  2316. gen_composite_out_kernel,
  2317. structured_native_functions,
  2318. )
  2319. ),
  2320. },
  2321. )
  2322. def gen_declarations_yaml(
  2323. cpu_fm: FileManager, native_functions: Sequence[NativeFunction]
  2324. ) -> None:
  2325. cpu_fm.write(
  2326. "Declarations.yaml",
  2327. lambda: format_yaml([compute_declaration_yaml(f) for f in native_functions]),
  2328. )
  2329. def get_torchgen_root() -> pathlib.Path:
  2330. """
  2331. If you're depending on torchgen out-of-tree, you can use the root to figure
  2332. out the path to native_functions.yaml
  2333. """
  2334. return pathlib.Path(__file__).parent.resolve()
  2335. def main() -> None:
  2336. parser = argparse.ArgumentParser(description="Generate ATen source files")
  2337. parser.add_argument(
  2338. "-s",
  2339. "--source-path",
  2340. help="path to source directory for ATen",
  2341. default="aten/src/ATen",
  2342. )
  2343. parser.add_argument(
  2344. "-o",
  2345. "--output-dependencies",
  2346. help="output a list of dependencies into the given file and exit",
  2347. )
  2348. parser.add_argument(
  2349. "--dry-run",
  2350. action="store_true",
  2351. help="run without writing any files (still updates outputs)",
  2352. )
  2353. parser.add_argument(
  2354. "--per-operator-headers",
  2355. action="store_true",
  2356. help="generate separate headers per operator in ATen/ops",
  2357. )
  2358. parser.add_argument(
  2359. "-d",
  2360. "--install-dir",
  2361. "--install_dir",
  2362. help="output directory",
  2363. default="build/aten/src/ATen",
  2364. )
  2365. parser.add_argument(
  2366. "--rocm",
  2367. action="store_true",
  2368. help="reinterpret CUDA as ROCm/HIP and adjust filepaths accordingly",
  2369. )
  2370. parser.add_argument(
  2371. "--mps",
  2372. action="store_true",
  2373. help="Generate MPS registration code when set",
  2374. )
  2375. # TODO: --op-registration-whitelist will be removed when all call-sites
  2376. # for gen.py are moved over to using the operator YAML file for mobile
  2377. # custom build.
  2378. parser.add_argument(
  2379. "--op-registration-whitelist",
  2380. "--op_registration_whitelist",
  2381. nargs="*",
  2382. help="filter op registrations by the whitelist (if set); "
  2383. "each item is `namespace`::`operator name` without overload name; "
  2384. "e.g.: aten::empty aten::conv2d ...",
  2385. )
  2386. parser.add_argument(
  2387. "--op-selection-yaml-path",
  2388. "--op_selection_yaml_path",
  2389. help="Provide a path to the operator selection (for custom build) YAML "
  2390. "that contains the information about the set of selected operators "
  2391. "and their categories (training, ...). Each operator is either a "
  2392. "full operator name with overload or just a bare operator name. "
  2393. "The operator names also contain the namespace prefix (e.g. aten::)",
  2394. )
  2395. parser.add_argument(
  2396. "--backend-whitelist",
  2397. "--backend_whitelist",
  2398. nargs="*",
  2399. help="filter dispatch backend by the whitelist (if set), "
  2400. "e.g.: CPU CUDA QuantizedCPU ...",
  2401. )
  2402. parser.add_argument(
  2403. "--static-dispatch-backend",
  2404. "--static_dispatch_backend",
  2405. nargs="*",
  2406. help="generate static dispatch code for the specific backend (if set)",
  2407. )
  2408. parser.add_argument(
  2409. "--skip-dispatcher-op-registration",
  2410. "--skip_dispatcher_op_registration",
  2411. action="store_true",
  2412. help="Avoid registering operators into the dispatcher.",
  2413. )
  2414. parser.add_argument(
  2415. "--force-schema-registration",
  2416. "--force_schema_registration",
  2417. action="store_true",
  2418. help="force it to generate schema-only registrations for all ops, including"
  2419. "those that are not listed on --op-registration-whitelist",
  2420. )
  2421. parser.add_argument(
  2422. "--generate",
  2423. type=str,
  2424. nargs="*",
  2425. choices=["headers", "sources", "declarations_yaml"],
  2426. default=["headers", "sources", "declarations_yaml"],
  2427. help="Generate only a subset of files",
  2428. )
  2429. options = parser.parse_args()
  2430. selector = get_custom_build_selector(
  2431. options.op_registration_whitelist,
  2432. options.op_selection_yaml_path,
  2433. )
  2434. native_yaml_path = os.path.join(options.source_path, "native/native_functions.yaml")
  2435. tags_yaml_path = os.path.join(options.source_path, "native/tags.yaml")
  2436. from torchgen.model import dispatch_keys
  2437. # TODO: stop generating CUDA kernels for non-CUDA builds
  2438. ignore_keys = set()
  2439. if not options.mps:
  2440. ignore_keys.add(DispatchKey.MPS)
  2441. if DispatchKey.MPS in dispatch_keys:
  2442. del dispatch_keys[dispatch_keys.index(DispatchKey.MPS)]
  2443. parsed_yaml = parse_native_yaml(native_yaml_path, tags_yaml_path, ignore_keys)
  2444. valid_tags = _GLOBAL_PARSE_TAGS_YAML_CACHE[tags_yaml_path]
  2445. native_functions, backend_indices = (
  2446. parsed_yaml.native_functions,
  2447. parsed_yaml.backend_indices,
  2448. )
  2449. grouped_native_functions = get_grouped_native_functions(native_functions)
  2450. structured_native_functions = [
  2451. g for g in grouped_native_functions if isinstance(g, NativeFunctionsGroup)
  2452. ]
  2453. native_functions_with_view_groups = get_grouped_by_view_native_functions(
  2454. native_functions
  2455. )
  2456. view_groups = [
  2457. g
  2458. for g in native_functions_with_view_groups
  2459. if isinstance(g, NativeFunctionsViewGroup)
  2460. ]
  2461. # NB: It is mandatory to NOT use os.path.join here, as the install directory
  2462. # will eventually be ingested by cmake, which does not respect Windows style
  2463. # path slashes. If you switch this to use os.path.join, you'll get an error
  2464. # like:
  2465. #
  2466. # Syntax error in cmake code when parsing string
  2467. #
  2468. # C:/Jenkins/workspace/pytorch-builds/pytorch-win-ws2016-cuda9-cudnn7-py3-build/build/aten/src/ATen\core/TensorMethods.h
  2469. #
  2470. # Invalid character escape '\c'.
  2471. core_install_dir = f"{options.install_dir}/core"
  2472. pathlib.Path(core_install_dir).mkdir(parents=True, exist_ok=True)
  2473. ops_install_dir = f"{options.install_dir}/ops"
  2474. pathlib.Path(ops_install_dir).mkdir(parents=True, exist_ok=True)
  2475. core_fm = make_file_manager(options=options, install_dir=core_install_dir)
  2476. cpu_fm = make_file_manager(options=options)
  2477. cpu_vec_fm = make_file_manager(options=options)
  2478. cuda_fm = make_file_manager(options=options)
  2479. ops_fm = make_file_manager(options=options, install_dir=ops_install_dir)
  2480. # Only a limited set of dispatch keys get CPUFunctions.h headers generated
  2481. # for them; this is the set
  2482. functions_keys = {
  2483. DispatchKey.CPU,
  2484. DispatchKey.CUDA,
  2485. DispatchKey.CompositeImplicitAutograd,
  2486. DispatchKey.CompositeImplicitAutogradNestedTensor,
  2487. DispatchKey.CompositeExplicitAutograd,
  2488. DispatchKey.CompositeExplicitAutogradNonFunctional,
  2489. DispatchKey.Meta,
  2490. }
  2491. if options.mps:
  2492. functions_keys.add(DispatchKey.MPS)
  2493. if options.backend_whitelist:
  2494. dispatch_keys = [
  2495. k
  2496. for k in dispatch_keys
  2497. if is_generic_dispatch_key(k) or str(k) in options.backend_whitelist
  2498. ]
  2499. static_dispatch_idx: List[BackendIndex] = []
  2500. if options.static_dispatch_backend:
  2501. static_dispatch_idx = [
  2502. backend_indices[DispatchKey.parse(key)]
  2503. for key in options.static_dispatch_backend
  2504. ]
  2505. for key in options.static_dispatch_backend:
  2506. dp_key = DispatchKey.parse(key)
  2507. if dp_key not in functions_keys:
  2508. functions_keys.add(dp_key)
  2509. if "sources" in options.generate:
  2510. gen_source_files(
  2511. native_functions=native_functions,
  2512. grouped_native_functions=grouped_native_functions,
  2513. structured_native_functions=structured_native_functions,
  2514. view_groups=view_groups,
  2515. selector=selector,
  2516. static_dispatch_idx=static_dispatch_idx,
  2517. backend_indices=backend_indices,
  2518. core_fm=core_fm,
  2519. cpu_fm=cpu_fm,
  2520. cpu_vec_fm=cpu_vec_fm,
  2521. cuda_fm=cuda_fm,
  2522. dispatch_keys=dispatch_keys,
  2523. functions_keys=functions_keys,
  2524. rocm=options.rocm,
  2525. force_schema_registration=options.force_schema_registration,
  2526. per_operator_headers=options.per_operator_headers,
  2527. skip_dispatcher_op_registration=options.skip_dispatcher_op_registration,
  2528. )
  2529. if "headers" in options.generate:
  2530. gen_headers(
  2531. native_functions=native_functions,
  2532. valid_tags=valid_tags,
  2533. grouped_native_functions=grouped_native_functions,
  2534. structured_native_functions=structured_native_functions,
  2535. static_dispatch_idx=static_dispatch_idx,
  2536. selector=selector,
  2537. backend_indices=backend_indices,
  2538. core_fm=core_fm,
  2539. cpu_fm=cpu_fm,
  2540. cuda_fm=cuda_fm,
  2541. ops_fm=ops_fm,
  2542. dispatch_keys=dispatch_keys,
  2543. functions_keys=functions_keys,
  2544. rocm=options.rocm,
  2545. per_operator_headers=options.per_operator_headers,
  2546. )
  2547. if "declarations_yaml" in options.generate:
  2548. gen_declarations_yaml(native_functions=native_functions, cpu_fm=cpu_fm)
  2549. if options.output_dependencies:
  2550. depfile_path = pathlib.Path(options.output_dependencies).resolve()
  2551. depfile_name = depfile_path.name
  2552. depfile_stem = depfile_path.stem
  2553. for fm, prefix in [
  2554. (cpu_fm, ""),
  2555. (cpu_vec_fm, "cpu_vec_"),
  2556. (core_fm, "core_"),
  2557. (cuda_fm, "cuda_"),
  2558. (ops_fm, "ops_"),
  2559. ]:
  2560. varname = prefix + depfile_stem
  2561. path = depfile_path.parent / (prefix + depfile_name)
  2562. fm.write_outputs(varname, str(path))
  2563. if __name__ == "__main__":
  2564. main()