native_function_generation.py 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630
  1. from collections import defaultdict
  2. from typing import Dict, List, Optional, Sequence, Tuple, Union
  3. import torchgen.api.dispatcher as dispatcher
  4. from torchgen.api.translate import translate
  5. from torchgen.api.types import Binding, DispatcherSignature, Expr
  6. from torchgen.context import with_native_function
  7. from torchgen.model import (
  8. Annotation,
  9. Argument,
  10. BackendIndex,
  11. BackendMetadata,
  12. BaseOperatorName,
  13. BaseTy,
  14. BaseType,
  15. DEFAULT_KERNEL_NAMESPACE,
  16. DeviceCheckType,
  17. DispatchKey,
  18. FunctionSchema,
  19. NativeFunction,
  20. NativeFunctionsGroup,
  21. OperatorName,
  22. Return,
  23. SchemaKind,
  24. Variant,
  25. )
  26. from torchgen.utils import concatMap
  27. # See Note: [Out ops with functional variants that don't get grouped properly]
  28. OUT_OPS_THAT_DONT_GET_GROUPED_PROPERLY = [
  29. # This has a functional variant, but it's currently marked private.
  30. # This function should be marked private as well (*_backward ops aren't exposed to python anyway).
  31. "adaptive_avg_pool3d_backward.grad_input",
  32. # There's a functional variant, _slow_conv2d_backward.output_mask, that isn't grouped properly.
  33. # Maybe we can kill this operator in favor of convolution_backward?
  34. "_slow_conv2d_backward.grad_input",
  35. ]
  36. # See Note: [Mutable ops that cannot get an out variant]
  37. MUTABLE_OPS_THAT_CANNOT_GET_AN_OUT_VARIANT = [
  38. # should be out=?
  39. "_cummax_helper",
  40. # should be out=?
  41. "_cummin_helper",
  42. ]
  43. # All of these operators don't have any tensor like returns
  44. FUNCTIONAL_OPS_THAT_CANNOT_GET_AN_OUT_VARIANT = [
  45. "_assert_async", # no return
  46. "_dimI", # returns an int
  47. "_dimV", # returns an int
  48. "_has_same_storage_numel", # returns a boolean
  49. "_linalg_check_errors", # no return
  50. "_local_scalar_dense", # returns a Scalar
  51. "_nested_tensor_from_mask_left_aligned", # returns a boolean
  52. "_nnz", # returns an int
  53. "_use_cudnn_ctc_loss", # returns a boolean
  54. "_use_cudnn_ctc_loss.Tensor", # returns a boolean
  55. "_validate_compressed_sparse_indices", # no return
  56. "allclose", # returns a boolean
  57. "dense_dim", # returns an int
  58. "equal", # returns a boolean
  59. "is_coalesced", # returns an boolean
  60. "is_pinned", # returns a boolean
  61. "is_same_size", # returns a boolean
  62. "is_set_to", # returns a boolean
  63. "q_per_channel_axis", # returns an int
  64. "q_scale", # returns a float
  65. "q_zero_point", # returns an int
  66. "qscheme", # returns a QScheme
  67. "record_stream", # no return
  68. "sparse_dim", # returns an int
  69. "_nested_tensor_offsets", # returns a vector of ints
  70. "_chunk_grad_outputs_efficient_attention", # returns a bool
  71. "_fused_sdp_choice", # returns an int
  72. ]
  73. INPLACE_OPS_THAT_DONT_GET_GROUPED_PROPERLY = [
  74. # polygamma and polygamma.out both exist, but have a
  75. # pre-self arg (while polygamma_ does not)
  76. # We should either fix this schema so it can be grouped properly,
  77. # or allow the codegen to generate new functional/out= NativeFunctions for this op
  78. # (which would require changing its overload name to prevent overload ambiguity).
  79. "polygamma_"
  80. ]
  81. # Groups "similar" NativeFunctions together
  82. # example add.Tensor, add_.Tensor, add.out
  83. # "similar" NativeFunctions are all expected to have an identical `signature()`,
  84. # But have differing SchemaKinds.
  85. def pre_group_native_functions(
  86. native_functions: Sequence[NativeFunction],
  87. ) -> Dict[FunctionSchema, Dict[SchemaKind, NativeFunction]]:
  88. pre_grouped_native_functions: Dict[
  89. FunctionSchema, Dict[SchemaKind, NativeFunction]
  90. ] = defaultdict(dict)
  91. for f in native_functions:
  92. d = pre_grouped_native_functions[f.func.signature()]
  93. assert f.func.kind() not in d
  94. d[f.func.kind()] = f
  95. return pre_grouped_native_functions
  96. # Returns the out variant overload name given a base function overload name
  97. def get_expected_out_variant_overload_name(overload_name: Optional[str]) -> str:
  98. return "out" if not overload_name else f"{overload_name}_out"
  99. # Helper function: given an inplace FunctionSchema, generate its corresponding out= variant
  100. # Example before:
  101. # _add_relu_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
  102. # Example after:
  103. # _add_relu.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out)
  104. def self_to_out_signature(func: FunctionSchema) -> FunctionSchema:
  105. # Generating an out= schema from an inplace schema.
  106. assert func.kind() == SchemaKind.inplace
  107. assert func.arguments.self_arg is not None
  108. # The new out= schema has:
  109. # - a new out argument with the same type as "func" (but with a mutable annotation)
  110. # - The returns (if any) now alias the out= argument instead of "func"
  111. # - an "out" overload name
  112. return FunctionSchema(
  113. name=func.name.remove_inplace().with_overload(
  114. get_expected_out_variant_overload_name(func.name.overload_name)
  115. ),
  116. arguments=func.arguments.remove_self_annotation().with_out_args(
  117. [
  118. Argument(
  119. name="out",
  120. type=func.arguments.self_arg.argument.type,
  121. default=None,
  122. annotation=func.arguments.self_arg.argument.annotation,
  123. )
  124. ]
  125. ),
  126. returns=func.returns,
  127. )
  128. # Helper function: given a functional FunctionSchema, generate its corresponding out= variant
  129. # Example before:
  130. # _to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None,
  131. # bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor
  132. # Example after:
  133. # _to_copy._out(Tensor self, *, bool non_blocking=False, MemoryFormat? memory_format=None,
  134. # Tensor(a!) out) -> Tensor(a!)
  135. def functional_to_out_signature(func: FunctionSchema) -> FunctionSchema:
  136. # Generating an out= schema from a functional schema.
  137. assert func.kind() == SchemaKind.functional
  138. new_returns, new_out_args = generate_out_args_from_schema(func)
  139. # The new out= schema has:
  140. # - one or more new out argument(s) with the same type as returns (but with a mutable annotation)
  141. # - The returns now alias the out= arguments
  142. # - an "_out" overload name
  143. return FunctionSchema(
  144. name=func.name.with_overload(
  145. get_expected_out_variant_overload_name(func.name.overload_name)
  146. ),
  147. arguments=func.arguments.signature().with_out_args(
  148. new_out_args,
  149. ),
  150. returns=tuple(new_returns),
  151. )
  152. # Helper function: given a function schema, generate corresponding out arguments, also the updated return annotations.
  153. def generate_out_args_from_schema(
  154. func: FunctionSchema,
  155. ) -> Tuple[List[Return], List[Argument]]:
  156. # More of a sanity check - our existing restrictions on schemas should enforce that
  157. # mutable schema kinds never return their mutable arguments.
  158. assert not any(
  159. r.annotation is not None and r.annotation.is_write for r in func.returns
  160. )
  161. tensorlike_rets = [r for r in func.returns if r.type.is_tensor_like()]
  162. assert len(tensorlike_rets) > 0
  163. used_annotations = concatMap(
  164. lambda a: [] if a.annotation is None else a.annotation.alias_set,
  165. func.arguments.flat_all,
  166. )
  167. valid_annotations = [
  168. x for x in "abcdefghijklmnopqrstuvwxyz" if x not in used_annotations
  169. ]
  170. all_rets_are_tensors = all(r.type == BaseType(BaseTy.Tensor) for r in func.returns)
  171. new_out_args: List[Argument] = []
  172. # The end result of new_returns is that:
  173. # - If every return is a plain tensor, then the new returns == the old returns, but with the out= alias annotations added.
  174. # - Otherwise, none of the out arguments show up in the returns (and we're only left with non-tensor-like returns, if any).
  175. new_returns: List[Return] = []
  176. for (i, r) in enumerate(func.returns):
  177. if r.type.is_tensor_like():
  178. new_out = Argument(
  179. name="out" if len(func.returns) == 1 else f"out{i}",
  180. type=r.type,
  181. default=None,
  182. annotation=Annotation.parse(f"{valid_annotations[i]}!"),
  183. )
  184. new_out_args.append(new_out)
  185. if all_rets_are_tensors:
  186. # The convention for out= schemas is that they only return their out arguments
  187. # if the return is a plain Tensor (or if it's a tuple of plain Tensors)
  188. new_ret = Return(
  189. name=None, type=new_out.type, annotation=new_out.annotation
  190. )
  191. new_returns.append(new_ret)
  192. else:
  193. new_returns.append(r)
  194. return new_returns, new_out_args
  195. # Helper function: given a mutable FunctionSchema, generate its corresponding out= variant
  196. # Example before:
  197. # _fused_moving_avg_obs_fq_helper(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask) # noqa: B950
  198. # Example after:
  199. # _fused_moving_avg_obs_fq_helper._out(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False, *, Tensor(e!) out0, Tensor(f!) out1) -> (Tensor(e!), Tensor(f!)) # noqa: B950
  200. def mutable_to_out_signature(func: FunctionSchema) -> FunctionSchema:
  201. # Generating an out= schema from a mutable schema.
  202. assert func.kind() == SchemaKind.mutable
  203. # The new out= schema has:
  204. # - Any non-aliased tensor-like returns are converted to mutable, aliased out= arguments
  205. # (if the argument is a tensor then we also return it for method chaining,
  206. # otherwise we return nothing)
  207. # - an "out" overload name
  208. #
  209. # Note that:
  210. # (1) This also means that we can *only* generate an out= variant from a mutable schema
  211. # if the mutable schema has at least one tensor-like non-aliasing return.
  212. # (2) The generated out= variant still has mutable positional arguments,
  213. # but if necessary we could probably add another out= variant that also
  214. # functionalizes the mutable arguments (a functional_out variant)
  215. new_returns, new_out_args = generate_out_args_from_schema(func)
  216. return FunctionSchema(
  217. name=func.name.remove_inplace().with_overload(
  218. get_expected_out_variant_overload_name(func.name.overload_name)
  219. ),
  220. arguments=func.arguments.with_out_args(new_out_args),
  221. returns=tuple(new_returns),
  222. )
  223. # This function, given function of one SchemaKind, as well as a target SchemaKind,
  224. # generates a new NativeFunction with the same properties, but using the target SchemaKind.
  225. # We only actually generate functions for either functional or out= SchemaKinds.
  226. # This function returns a tuple, with:
  227. # - The generated NativeFunction
  228. # - a dictionary of `BackendIndex` objects, describing which dispatch keys
  229. # we will generate kernels for, for the new NativeFunction.
  230. # Details are in the function, but we only generate composite kernels (in some cases) today.
  231. def generate_function(
  232. f: NativeFunction, k: SchemaKind
  233. ) -> Tuple[NativeFunction, Dict[DispatchKey, Dict["OperatorName", "BackendMetadata"]]]:
  234. from torchgen.api import cpp
  235. if k == SchemaKind.functional:
  236. assert f.func.kind() != SchemaKind.functional
  237. # The new "functional" NativeFunction has:
  238. # - any mutable arguments have been converted into (immutable) returns.
  239. # (if a mutable argument was not also a return, it gets converted to one)
  240. # - "_functional" appended to the base name, ONLY IF this op has a mutable variant.
  241. # See Note [Overload Ambiguity With Functional Variants]
  242. # The default grouping logic in signature() actually already does this,
  243. # so we can piggy-back off it (but we still want return names)
  244. func = f.func.signature(keep_return_names=True).with_name(
  245. OperatorName(
  246. name=BaseOperatorName(
  247. base=f.func.name.name.base,
  248. inplace=False,
  249. dunder_method=f.func.name.name.dunder_method,
  250. # See Note [Overload Ambiguity With Functional Variants]
  251. functional_overload=f.func.kind() == SchemaKind.mutable,
  252. ),
  253. overload_name=f.func.name.overload_name,
  254. )
  255. )
  256. elif k == SchemaKind.out:
  257. # We generate out= ops mostly just so that we can pair up NativeFunctions into groups easily,
  258. # but at least today, there is no good reason to actually use them.
  259. # we'll generate a dispatcher entry for them, but won't actually register any kernels for them.
  260. if f.func.kind() == SchemaKind.inplace:
  261. func = self_to_out_signature(f.func)
  262. elif f.func.kind() == SchemaKind.mutable:
  263. func = mutable_to_out_signature(f.func)
  264. elif f.func.kind() == SchemaKind.functional:
  265. func = functional_to_out_signature(f.func)
  266. else:
  267. raise AssertionError(
  268. "We only bother generating out= functions from either inplace or mutable or functional variants"
  269. )
  270. else:
  271. raise AssertionError(
  272. "We currently only generate either functional or out= NativeFunctions"
  273. )
  274. # Generated kernel naming convention for out: <op_name>_<overload_name>. The reason for this is to
  275. # disambiguate operator with the same name but different overload name, e.g., `randn.names_out` and
  276. # `randn.generator_with_names_out`.
  277. kernel_name = (
  278. func.name.unambiguous_name()
  279. if func.kind() == SchemaKind.out
  280. else cpp.name(func)
  281. )
  282. if f.func.has_symint():
  283. kernel_name += "_symint"
  284. backend_metadata = {
  285. DispatchKey.CompositeExplicitAutograd: {
  286. func.name: BackendMetadata(
  287. kernel=kernel_name,
  288. structured=False,
  289. cpp_namespace=DEFAULT_KERNEL_NAMESPACE,
  290. )
  291. }
  292. }
  293. tags = {"generated"} | set(f.tags & {"nondeterministic_seeded", "view_copy"})
  294. return (
  295. NativeFunction(
  296. func=func,
  297. use_const_ref_for_mutable_tensors=f.use_const_ref_for_mutable_tensors,
  298. # These generated fn's aren't meant to be user friendly- don't generate methods.
  299. variants={Variant.function},
  300. structured=False,
  301. structured_delegate=None,
  302. structured_inherits=None,
  303. precomputed=None,
  304. autogen=[],
  305. ufunc_inner_loop={},
  306. manual_kernel_registration=False,
  307. manual_cpp_binding=False,
  308. python_module=None,
  309. category_override=None,
  310. device_guard=False,
  311. device_check=DeviceCheckType.NoCheck,
  312. loc=f.loc,
  313. cpp_no_default_args=set(),
  314. is_abstract=f.is_abstract,
  315. has_composite_implicit_autograd_kernel=False,
  316. has_composite_implicit_autograd_nested_tensor_kernel=False,
  317. has_composite_explicit_autograd_kernel=True,
  318. has_composite_explicit_autograd_non_functional_kernel=False,
  319. # Every generated NativeFunction gets a "generated" tag, so it's easy to tell
  320. # which NativeFunction objects did not come directly from native_functions.yaml.
  321. tags=tags,
  322. namespace=f.namespace,
  323. ),
  324. backend_metadata,
  325. )
  326. # This function is responsible for adding generated NativeFunctions which don't appear
  327. # explicitly in the codegen.
  328. # You can inspect the full list of NativeFunctions yourself with the torchgen package, by running
  329. # torchgen.parse_native_yaml("aten/src/ATen/native/native_functions.yaml", "aten/src/ATen/native/tags.yaml")
  330. # (Maybe we should make a friendly API for this)
  331. #
  332. # Note: this function *mutates* its two inputs,
  333. # adding the new NativeFunctions / BackendMetadata to them
  334. def add_generated_native_functions(
  335. rs: List[NativeFunction],
  336. indices: Dict[DispatchKey, Dict[OperatorName, BackendMetadata]],
  337. ) -> None:
  338. # The main code for gnerating new NativeFunctions
  339. # First we group of NaitveFunctions by schema kind,
  340. # then we detect which ones are missing and generate them.
  341. pre_grouped_native_functions = pre_group_native_functions(rs)
  342. for k, d in pre_grouped_native_functions.items():
  343. has_functional = SchemaKind.functional in d
  344. has_inplace = SchemaKind.inplace in d
  345. has_mutable = SchemaKind.mutable in d
  346. has_out = SchemaKind.out in d
  347. # We automatically generate a few native functions that don't exist in the yaml, for a few reasons:
  348. # (1) If an operator has an inplace/out= variant but no functional variant, we can generate
  349. # a simple functional variant that the functionalization pass can consume.
  350. # (2) If an operator has an inplace or functional but no out= variant, we generate an out=
  351. # variant, mostly so we can easily pair up functions into NativeFunctionsGroup,
  352. # while maintaining the constraint that the out= variant is "required".
  353. if has_mutable or has_inplace or has_out or has_functional:
  354. # Don't bother generating functions trio's for native functions that bypass the dispatcher.
  355. are_manual = all(f.manual_cpp_binding for f in d.values())
  356. # Don't bother generating functional + out= variants for view operators
  357. has_view_ops = any(f.is_view_op for f in d.values())
  358. # Don't generate the other variants for CompositeImplicitAutograd operators.
  359. # We could probably do this, but the main benefit of generating the function triplets
  360. # is for transforms that need them, and transforms don't need to act directly
  361. # on CompositeImplicitAutograd operators (since we let them decompose).
  362. are_composite_implicit = all(
  363. f.has_composite_implicit_autograd_kernel for f in d.values()
  364. )
  365. if are_manual or has_view_ops or are_composite_implicit:
  366. continue
  367. if has_out and len(d.values()) == 1:
  368. # Note: [Out ops with functional variants that don't get grouped properly]
  369. # In theory we could validly have an out= operator in native_functions.yaml
  370. # that has no other variants.
  371. # But today, all of the operators where that's the case actually do have
  372. # functional variants, that we are just unable to pair up properly.
  373. # I think banning this all together is probably safer
  374. # (you can always add a functional variant yourself if you want to add a new out= operator).
  375. #
  376. # We should probably fix the existing cases; this check is to prevent us from adding more over time.
  377. if (
  378. str(d[SchemaKind.out].func.name)
  379. not in OUT_OPS_THAT_DONT_GET_GROUPED_PROPERLY
  380. ):
  381. raise AssertionError(
  382. f"Found an out= operator that we could not find any other variants of: {str(d[SchemaKind.out].func)}"
  383. )
  384. continue
  385. # Some inplace ops that have problematic schemas (that we should fix), which prevent us
  386. # from generating out= and functional variants
  387. if (
  388. has_inplace
  389. and str(d[SchemaKind.inplace].func.name)
  390. in INPLACE_OPS_THAT_DONT_GET_GROUPED_PROPERLY
  391. ):
  392. continue
  393. base_fn = (
  394. d[SchemaKind.inplace]
  395. if has_inplace
  396. else d[SchemaKind.mutable]
  397. if has_mutable
  398. else d[SchemaKind.out]
  399. if has_out
  400. else d[SchemaKind.functional]
  401. )
  402. # Note: [Mutable ops that cannot get an out variant]
  403. # We can only generate an out= variant if either:
  404. # - the original function has tensor-like returns (since we can convert them to out kwargs)
  405. # - or it's inplace (since we can convert `self` to an out kwarg)
  406. # There are only two functions that don't fit this criteria today though,
  407. # and they both look like they should be fixed to be out= variants,
  408. # so if feels safer to ban this schema all-together
  409. base_fn_valid = base_fn.func.kind() == SchemaKind.inplace or any(
  410. r.type.is_tensor_like() for r in base_fn.func.returns
  411. )
  412. # Note: [Loosen the assertion that all functional should have out variant]
  413. # By design all functional operators should have our variants. The needs_out check
  414. # is loosening this requirement, changing it to only generate out variant if there's
  415. # an `autogen` block in the native function, in the long run it should be removed.
  416. # FIXME: Remove this after figuring out CI job failures related to min, max, mean
  417. needs_out = any("out" in str(op_name) for op_name in base_fn.autogen)
  418. gets_out_variant = not has_out and base_fn_valid and needs_out
  419. if not has_out and not base_fn_valid:
  420. if (
  421. str(base_fn.func.name)
  422. not in MUTABLE_OPS_THAT_CANNOT_GET_AN_OUT_VARIANT
  423. and str(base_fn.func.name)
  424. not in FUNCTIONAL_OPS_THAT_CANNOT_GET_AN_OUT_VARIANT
  425. ):
  426. raise AssertionError(
  427. f"""Found an operator that we could not generate an out= variant for: {str(base_fn.func)}.
  428. This type of operators don't have tensor-like return, making it difficult to generate a proper out= variant. If
  429. out= variant is not needed, please add the function name into FUNCTIONAL_OPS_THAT_CANNOT_GET_AN_OUT_VARIANT list."""
  430. )
  431. # Generate an out= variant
  432. if gets_out_variant:
  433. fn, metadata = generate_function(base_fn, SchemaKind.out)
  434. d[SchemaKind.out] = fn
  435. BackendIndex.grow_index(indices, metadata)
  436. rs.append(fn)
  437. # Generate a functional variant, but only do it if the operator got an out= variant
  438. # (Functional variants are only useful if we can group up the variants,
  439. # which we can only do if they have an out= variant)
  440. if not has_functional and (has_out or gets_out_variant):
  441. fn, metadata = generate_function(base_fn, SchemaKind.functional)
  442. d[SchemaKind.functional] = fn
  443. BackendIndex.grow_index(indices, metadata)
  444. rs.append(fn)
  445. def return_str(rets: Tuple[Return, ...], names: List[str]) -> str:
  446. assert len(rets) == len(names)
  447. if len(rets) == 0:
  448. return ""
  449. elif len(rets) == 1:
  450. return f"return {names[0]};"
  451. else:
  452. return f"return {dispatcher.returns_type(rets).cpp_type()}({', '.join(names)});"
  453. # Given a function, and the name of a variable correponding to the output of that function,
  454. # gather up all of the individual returns that are not aliased
  455. def gather_nonaliased_inner_rets(func: FunctionSchema, out_var: str) -> List[str]:
  456. aliased_rets = func.aliased_return_names()
  457. non_aliased_names = []
  458. is_out_var_a_tuple = len(func.returns) > 1
  459. for (i, r) in enumerate(aliased_rets):
  460. if r is None:
  461. non_aliased_names.append(
  462. f"std::get<{i}>({out_var})" if is_out_var_a_tuple else out_var
  463. )
  464. return non_aliased_names
  465. # Generates functional kernels in terms of their inplace.mutable counterparts.
  466. # We only do this for "generated" NativeFunctions
  467. @with_native_function
  468. def gen_composite_functional_kernel(g: NativeFunctionsGroup) -> Optional[str]:
  469. # We should only be generating these for code-generated NativeFunctions
  470. if "generated" not in g.functional.tags:
  471. return None
  472. # And we always write the kernel for a generated op in terms of a non-generated op.
  473. if g.inplace is not None and "generated" not in g.inplace.tags:
  474. target_f = g.inplace
  475. elif g.mutable is not None and "generated" not in g.mutable.tags:
  476. target_f = g.mutable
  477. else:
  478. # We should be guaranteed to have a valid inplace/mutable variant to call into.
  479. # See Note: [Mutable Ops Not Using Functionalization]
  480. raise AssertionError(str(g.functional.func))
  481. sig = DispatcherSignature(g.functional.func)
  482. target_sig = DispatcherSignature(target_f.func)
  483. context: List[Union[Binding, Expr]] = []
  484. clone_mutable_inputs = []
  485. cloned_return_names = []
  486. # We can't just directly pass all of the arguments from the functional op into the mutating op.
  487. # We need to check for which inputs to the mutating operator are mutable,
  488. # and clone those inputs first.
  489. for a_curr, a_tgt in zip(
  490. dispatcher.jit_arguments(g.functional.func),
  491. dispatcher.jit_arguments(target_f.func),
  492. ):
  493. if a_tgt.annotation is not None and a_tgt.annotation.is_write:
  494. clone_mutable_inputs.append(
  495. f"auto {a_curr.name}_clone = clone_arg({a_curr.name});"
  496. )
  497. context.append(
  498. Expr(
  499. expr=f"{a_curr.name}_clone",
  500. type=dispatcher.argument_type(a_curr, binds=a_curr.name),
  501. )
  502. )
  503. # Invariant: mutable arguments on the inner mutable op are always returns on the functional op.
  504. cloned_return_names.append(f"{a_curr.name}_clone")
  505. else:
  506. context.append(dispatcher.argument(a_curr))
  507. exprs = ", ".join([e.expr for e in translate(context, target_sig.arguments())])
  508. out_name = "output"
  509. maybe_assign = f"auto {out_name} = " if len(target_f.func.returns) > 0 else ""
  510. inner_return_names = gather_nonaliased_inner_rets(target_f.func, out_name)
  511. ret_str = return_str(
  512. g.functional.func.returns, inner_return_names + cloned_return_names
  513. )
  514. clone_mutable_inputs_str = "\n".join(clone_mutable_inputs)
  515. return f"""
  516. {sig.defn(name=sig.name() + ("_symint" if g.out.func.has_symint() else ""))} {{
  517. {clone_mutable_inputs_str}
  518. {maybe_assign}at::_ops::{target_f.func.name.unambiguous_name()}::call({exprs});
  519. {ret_str}
  520. }}
  521. """
  522. # Generates out= kernels in terms of their functional counterparts.
  523. # We only do this for "generated" NativeFunctions
  524. @with_native_function
  525. def gen_composite_out_kernel(g: NativeFunctionsGroup) -> Optional[str]:
  526. # We should only be generating these for code-generated NativeFunctions
  527. if "generated" not in g.out.tags:
  528. return None
  529. # And we always write the kernel for the out= op in terms of the functional.
  530. # Note that the functional op might have also been generated, but we don't have to
  531. # worry about cycles, because the generated functional kernels are always implemented
  532. # in terms of non-generated kernels (see gen_composite_functional_kernel).
  533. sig = DispatcherSignature(g.out.func)
  534. target_sig = DispatcherSignature(g.functional.func)
  535. exprs = ", ".join(
  536. [e.expr for e in translate(sig.arguments(), target_sig.arguments())]
  537. )
  538. copy_outs = []
  539. out_name = "tmp_output"
  540. for i, out_arg in enumerate(g.out.func.arguments.out):
  541. functional_return_name = (
  542. out_name
  543. if len(g.functional.func.returns) == 1
  544. else f"std::get<{i}>({out_name})"
  545. )
  546. copy_outs.append(
  547. f"""\
  548. resize_out_helper({out_arg.name}, {functional_return_name});
  549. copy_arg({out_arg.name}, {functional_return_name});"""
  550. )
  551. rets = []
  552. # For each return arg in the calling (out=) operator,
  553. # If it corresponds to an aliased input, return the input.
  554. # Otherwise, return the corresponding output from calling the functional operator.
  555. for i, ret_name in enumerate(g.out.func.aliased_return_names()):
  556. if ret_name is not None:
  557. rets.append(ret_name)
  558. else:
  559. functional_return_name = (
  560. out_name
  561. if len(g.functional.func.returns) == 1
  562. else f"std::get<{i}>({out_name})"
  563. )
  564. rets.append(functional_return_name)
  565. copy_outs_str = "\n".join(copy_outs)
  566. # Kernel name needs to follow the naming convention defined in `generate_function()`
  567. return f"""
  568. {sig.defn(name=g.out.func.name.unambiguous_name() + ("_symint" if g.out.func.has_symint() else ""))} {{
  569. auto {out_name} = at::_ops::{g.functional.func.name.unambiguous_name()}::call({exprs});
  570. {copy_outs_str}
  571. {return_str(g.out.func.returns, rets)}
  572. }}
  573. """