lazy_ir.py 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710
  1. import itertools
  2. from abc import ABC
  3. from dataclasses import dataclass
  4. from typing import Any, Dict, List, Optional, Tuple, Union
  5. import torchgen.api.dispatcher as dispatcher
  6. from torchgen.api.lazy import (
  7. getValueT,
  8. isValueType,
  9. LazyArgument,
  10. LazyIrProperties,
  11. LazyIrSchema,
  12. tensorListValueT,
  13. )
  14. from torchgen.api.translate import translate
  15. from torchgen.api.types import (
  16. BaseCType,
  17. Binding,
  18. deviceT,
  19. DispatcherSignature,
  20. kernel_signature,
  21. NativeSignature,
  22. OptionalCType,
  23. VectorCType,
  24. )
  25. from torchgen.context import method_with_native_function
  26. from torchgen.dest.lazy_ts_lowering import ts_lowering_body
  27. from torchgen.model import (
  28. Argument,
  29. BackendIndex,
  30. BackendMetadata,
  31. BaseTy,
  32. BaseType,
  33. FunctionSchema,
  34. ListType,
  35. NativeFunction,
  36. NativeFunctionsGroup,
  37. )
  38. def node_ctor_arg_rvalue_string(arg: LazyArgument) -> str:
  39. """
  40. Given a LazyArgument,
  41. generate a c++ string for materializing an rvalue of that arg for passing into
  42. a lazy Node constructor.
  43. """
  44. # TODO: Matching on CType seems wrong; should be matching on Type
  45. if isValueType(arg.lazy_type):
  46. if isinstance(arg.lazy_type, BaseCType):
  47. if arg.is_wrapped_scalar:
  48. return f"node_{arg.name}"
  49. elif arg.lazy_type.type is tensorListValueT:
  50. return f"lazy_{arg.name}_tensorlist"
  51. elif arg.is_symint_or_list:
  52. return f"GetSymIntValue({arg.name})"
  53. return f"lazy_{arg.name}->GetIrValue()"
  54. elif isinstance(arg.lazy_type, OptionalCType):
  55. if arg.is_symint_or_list:
  56. # TODO: I don't understand when you should put lazy_ in the name
  57. # or not
  58. return f"{arg.name} ? c10::make_optional(GetSymIntValue(*{arg.name})) : c10::nullopt"
  59. elif arg.is_wrapped_scalar:
  60. return f"node_{arg.name}"
  61. return (
  62. f"lazy_{arg.name} ? "
  63. f"c10::make_optional(lazy_{arg.name}->GetIrValue()) : "
  64. "c10::nullopt"
  65. )
  66. else:
  67. raise AssertionError(
  68. f"TODO not sure if there are other valid types to handle here ({arg.lazy_type})"
  69. )
  70. else:
  71. # NB: this is here because right now we aren't treating SymInt[] as a
  72. # value type; when we do this needs to move above
  73. # NB: we cannot test arg.lazy_type as we've already specified it is an
  74. # int64_t and so we cannot distinguish between SymInt and int64_t
  75. if isinstance(arg.orig_type, ListType) and arg.orig_type.elem == BaseType(
  76. BaseTy.SymInt
  77. ):
  78. if arg.symint:
  79. return f"GetSymIntArrayRefValue({arg.name})"
  80. else:
  81. return f"std::vector<int64_t>({arg.name}.begin(), {arg.name}.end())"
  82. elif isinstance(arg.lazy_type, VectorCType) and isinstance(
  83. arg.lazy_type.elem, BaseCType
  84. ):
  85. return f"std::vector<{arg.lazy_type.elem.type}>({arg.name}.begin(), {arg.name}.end())"
  86. elif (
  87. isinstance(arg.lazy_type, OptionalCType)
  88. and isinstance(arg.lazy_type.elem, VectorCType)
  89. and isinstance(arg.lazy_type.elem.elem, BaseCType)
  90. ):
  91. return f"torch::lazy::ToOptionalVector<{arg.lazy_type.elem.elem.type}>({arg.name})"
  92. else:
  93. return f"{arg.name}"
  94. def node_ctor_inputs(schema: LazyIrSchema) -> str:
  95. """
  96. Produce a formatted string with the arguments as passed into the constructor of a node class.
  97. """
  98. node_ctor_values = [
  99. node_ctor_arg_rvalue_string(arg) for arg in schema.filtered_args()
  100. ]
  101. return ", ".join(node_ctor_values)
  102. def gen_fallback_code(
  103. schema: LazyIrSchema,
  104. sig: Union[DispatcherSignature, NativeSignature],
  105. overload_name: str,
  106. ) -> str:
  107. """
  108. Generate code that falls back to eager conditioned on a predicate
  109. """
  110. dispatcher_sig = DispatcherSignature.from_schema(schema.func)
  111. exprs = translate(sig.arguments(), dispatcher_sig.arguments())
  112. fallback_args = ",\n ".join([a.expr for a in exprs])
  113. if len(overload_name):
  114. aten_op_str = f"ATEN_OP2({schema.aten_name}, {overload_name})"
  115. else:
  116. aten_op_str = f"ATEN_OP({schema.aten_name})"
  117. or_has_generator = ""
  118. if schema.generator_arg:
  119. # generators are always optional and there is never more than one, at least currently
  120. or_has_generator = f" || ({schema.generator_arg.name}.has_value() && {schema.generator_arg.name}->defined())"
  121. return f"""
  122. if (force_eager_fallback({aten_symbol(schema)}){or_has_generator}) {{
  123. return at::native::call_fallback_fn_symint<&ltc_eager_fallback, {aten_op_str}>::call(
  124. {fallback_args}
  125. );
  126. }}
  127. """
  128. def aten_symbol(schema: LazyIrSchema) -> str:
  129. missing_interned_strings = {
  130. "sigmoid_backward",
  131. }
  132. if schema.aten_name in missing_interned_strings:
  133. return f'c10::Symbol::fromQualString("aten::{schema.aten_name}")'
  134. if not schema.aten_name.startswith("at::"):
  135. return f"at::aten::{schema.aten_name}"
  136. else:
  137. return schema.aten_name
  138. # converts all tensor-like arguments to meta tensors. Returns:
  139. # (1) a string containing all of the logic that does the conversions.
  140. # (2) a context, to be used by translate(), with all of the relevant bindings.
  141. def convert_to_meta_tensors(sig: DispatcherSignature) -> Tuple[str, List[Binding]]:
  142. context: List[Binding] = []
  143. unwrapped_tensor_args: List[str] = []
  144. for arg in sig.arguments():
  145. if isinstance(arg.argument, Argument) and arg.argument.type.is_tensor_like():
  146. unwrapped_name = f"{arg.name}_meta"
  147. unwrapped_tensor_args.append(
  148. f"auto {unwrapped_name} = to_meta({arg.name});"
  149. )
  150. context.append(arg.with_name(unwrapped_name))
  151. else:
  152. context.append(arg)
  153. unwrap_tensor_args_str = "\n ".join(unwrapped_tensor_args)
  154. return unwrap_tensor_args_str, context
  155. @dataclass(frozen=True)
  156. class GenLazyIR(ABC):
  157. backend_index: BackendIndex
  158. backend_name: str
  159. node_base: str
  160. use_lazy_shape: bool
  161. @method_with_native_function
  162. def __call__(self, f: Union[NativeFunctionsGroup, NativeFunction]) -> List[str]:
  163. func = f.functional.func if isinstance(f, NativeFunctionsGroup) else f.func
  164. metadata = self.backend_index.get_kernel(
  165. f.functional if isinstance(f, NativeFunctionsGroup) else f
  166. )
  167. schema = LazyIrSchema(
  168. func, symint=metadata is not None and metadata.supports_symint()
  169. )
  170. return self.gen(schema)
  171. # there is no lowering functionality generated unless this IR base class is subclassed and
  172. # implemented as a backend-specific node
  173. def lowering_function(self, schema: LazyIrSchema) -> str:
  174. return ""
  175. def create_function(self, schema: LazyIrSchema, node_ctor_args: str) -> str:
  176. return ""
  177. def can_be_reused_function(self, schema: LazyIrSchema, node_ctor_args: str) -> str:
  178. return f"""bool CanBeReused({node_ctor_args}) const {{
  179. return false;
  180. }}"""
  181. def node_base_ctor_call(self, schema: LazyIrSchema) -> str:
  182. value_args = schema.filtered_args(values=True, scalars=False)
  183. # backends can customize the way the node base class constructor is called,
  184. # as long as all of its arguments can be generated from information available from the schema
  185. base_ctor_value_args_list = []
  186. for arg in value_args:
  187. if isinstance(arg.lazy_type, BaseCType) or isinstance(
  188. arg.lazy_type, VectorCType
  189. ):
  190. base_ctor_value_args_list.append(f"{arg.name}")
  191. elif isinstance(arg.lazy_type, OptionalCType):
  192. base_ctor_value_args_list.append(f"{arg.name}.value_or(kNullValue)")
  193. else:
  194. raise AssertionError(
  195. f"Unsupported type ({arg.lazy_type}) - add support if necessary"
  196. )
  197. base_ctor_value_args = ", ".join(base_ctor_value_args_list)
  198. scalar_args = schema.filtered_args(values=False, scalars=True)
  199. # Shape constuction.
  200. # Conditionally build shape depending on specified shape property
  201. if schema.properties.ShapePrecompute:
  202. shape_ctor_arg = "std::move(shapes),"
  203. elif schema.properties.ShapeCompute:
  204. shape_args = [a.name for a in value_args]
  205. shape_args.extend(a.name for a in scalar_args)
  206. shape_ctor_arg = f"compute_shape_{schema.name}({', '.join(shape_args)}),"
  207. elif schema.properties.ShapeCache:
  208. shape_args = [f"operand({i})" for i in range(len(value_args))]
  209. shape_args.extend(a.name for a in scalar_args)
  210. shape_ctor_arg = f"[&](){{ return compute_shape_{schema.name}({', '.join(shape_args)})[0]; }},"
  211. else:
  212. shape_ctor_arg = ""
  213. scalar_hashes = ", ".join(f"{a.name}" for a in scalar_args)
  214. return f"""{self.node_base}(
  215. {schema.node_name}::ClassOpKind(),
  216. OpList{{{base_ctor_value_args}}},
  217. {shape_ctor_arg}
  218. /* num_outputs */ {len(schema.returns)},
  219. torch::lazy::MHash({scalar_hashes}))"""
  220. def gen(self, schema: LazyIrSchema) -> List[str]:
  221. opkind = schema.opkind or aten_symbol(schema)
  222. # for now, we just want one IR class decl and soon after also the method defs
  223. # and we use the functional version not out/inplace.
  224. all_args = schema.filtered_args()
  225. value_args = schema.filtered_args(values=True, scalars=False)
  226. scalar_args = schema.filtered_args(values=False, scalars=True)
  227. ctor_args = [f"const {i.lazy_type.cpp_type()}& {i.name}" for i in all_args]
  228. reuse_ctor_args = ", ".join(ctor_args)
  229. if self.use_lazy_shape and schema.properties.ShapePrecompute:
  230. ctor_args.append("std::vector<torch::lazy::Shape>&& shapes")
  231. node_ctor_args = ", ".join(ctor_args)
  232. scalar_initializers = ",\n ".join(
  233. [
  234. # This code is just special casing the mapping from string_view -> strings
  235. f"{a.name}({a.name}.has_value() ? c10::make_optional(std::string(*{a.name})) : c10::nullopt)"
  236. if a.lazy_type.cpp_type() == "c10::optional<c10::string_view>"
  237. else f"{a.name}({a.name})"
  238. for a in scalar_args
  239. ]
  240. )
  241. if len(scalar_initializers):
  242. scalar_initializers = f",\n {scalar_initializers}"
  243. scalar_decls = "\n ".join(
  244. [
  245. f"std::string {a.name};"
  246. if a.lazy_type.cpp_type() == "c10::string_view"
  247. else f"c10::optional<std::string> {a.name};"
  248. if a.lazy_type.cpp_type() == "c10::optional<c10::string_view>"
  249. else f"{a.lazy_type.cpp_type()} {a.name};"
  250. for a in scalar_args
  251. ]
  252. )
  253. optional_values = [
  254. arg.name
  255. for arg in schema.filtered_args(values=True, scalars=False)
  256. if isinstance(arg.lazy_type, OptionalCType)
  257. ]
  258. has_optional_decls = "\n ".join(
  259. [f"bool has_{value}: 1;" for value in optional_values]
  260. )
  261. has_optional_defs = "\n ".join(
  262. [f"has_{value} = !!{value};" for value in optional_values]
  263. )
  264. members_to_string = []
  265. for arg in scalar_args:
  266. if isinstance(arg.lazy_type, OptionalCType):
  267. members_to_string.append(
  268. f"""if ({arg.name}.has_value()) {{
  269. ss << ", {arg.name}=" << {arg.name}.value();
  270. }} else {{
  271. ss << ", {arg.name}=null";
  272. }}"""
  273. )
  274. else:
  275. members_to_string.append(f'ss << ", {arg.name}=" << {arg.name};')
  276. members_to_string_str = "\n ".join(members_to_string)
  277. return [
  278. f"""\
  279. class {schema.node_name} : public {self.node_base} {{
  280. public:
  281. static torch::lazy::OpKind ClassOpKind() {{
  282. return torch::lazy::OpKind({opkind});
  283. }}
  284. {schema.node_name}({node_ctor_args})
  285. : {self.node_base_ctor_call(schema)}{scalar_initializers}
  286. {{
  287. {has_optional_defs}
  288. }}
  289. std::string ToString() const override {{
  290. std::stringstream ss;
  291. ss << {self.node_base}::ToString();
  292. {members_to_string_str}
  293. return ss.str();
  294. }}
  295. {self.create_function(schema, reuse_ctor_args)}
  296. {self.can_be_reused_function(schema, reuse_ctor_args)}
  297. {self.lowering_function(schema)}
  298. {scalar_decls}
  299. {has_optional_decls}
  300. }};
  301. """,
  302. ]
  303. @dataclass(frozen=True)
  304. class GenTSLazyIR(GenLazyIR):
  305. def lowering_function(self, schema: LazyIrSchema) -> str:
  306. signature = """
  307. torch::lazy::TSOpVector Lower(
  308. std::shared_ptr<torch::jit::GraphFunction> function,
  309. torch::lazy::TSLoweringContext* loctx) const override"""
  310. if schema.properties.LowerDeclOnly:
  311. return f"{signature};"
  312. elif schema.properties.Lower:
  313. return f"""{signature} {{
  314. {ts_lowering_body(schema)}
  315. }}
  316. """
  317. else:
  318. return ""
  319. def create_function(self, schema: LazyIrSchema, node_ctor_args: str) -> str:
  320. signature = f"static NodePtr Create({node_ctor_args})"
  321. if schema.properties.CreateFnDeclOnly:
  322. return f"{signature};"
  323. elif not schema.properties.CreateFn:
  324. return ""
  325. return f"""{signature} {{
  326. return ReuseOrMakeNode<{schema.node_name}>(data);
  327. }}"""
  328. def can_be_reused_function(self, schema: LazyIrSchema, node_ctor_args: str) -> str:
  329. signature = f"bool CanBeReused({node_ctor_args}) const"
  330. if schema.properties.CanBeReusedDeclOnly:
  331. return f"{signature};"
  332. elif not schema.properties.CanBeReused:
  333. return ""
  334. value_comparison = []
  335. for arg in itertools.chain(schema.positional_values, schema.keyword_values):
  336. if isinstance(arg.lazy_type, OptionalCType):
  337. value_comparison.append(
  338. f"nullable_operand(i++) == {arg.name}.value_or(kNullValue)"
  339. )
  340. else:
  341. value_comparison.append(f"operand(i++) == {arg.name}")
  342. for arg in itertools.chain(schema.positional_scalars, schema.keyword_scalars):
  343. if isinstance(arg.lazy_type, OptionalCType):
  344. value_comparison.append(
  345. f"((!this->{arg.name}&&!{arg.name}) || (this->{arg.name}&&{arg.name} && *(this->{arg.name}) == *{arg.name}))"
  346. )
  347. else:
  348. value_comparison.append(f"this->{arg.name} == {arg.name}")
  349. value_comparison_str = " &&\n ".join(value_comparison)
  350. return f"""{signature} {{
  351. size_t i = 0;
  352. return ({value_comparison_str});
  353. }}"""
  354. @dataclass(frozen=True)
  355. class GenLazyNativeFuncDefinition:
  356. class_method_name: str
  357. backend_index: BackendIndex
  358. tensor_class: str
  359. gen_forced_fallback_code: bool
  360. backend_namespace: str
  361. get_tensorlist: str
  362. get_tensor_or_wrap_number: str
  363. try_get_tensor: str
  364. metrics_counter: str
  365. create_tensor: str
  366. create_from_first_tensor: bool
  367. create_aten_from_ltc_tensor: str
  368. tuple_aten_from_ltc_tensors: str
  369. lazy_tensor_ptr: str
  370. get_device_fn: str
  371. def lazy_tensor_decls(self, func: NativeFunction, schema: LazyIrSchema) -> str:
  372. value_args = schema.filtered_args(values=True, scalars=False)
  373. # Generates lazy_{name} variables for LazyTensors wrapping input tensors
  374. lazy_tensor_decls: List[str] = []
  375. for arg in value_args:
  376. if arg.is_wrapped_scalar:
  377. if isinstance(arg.lazy_type, OptionalCType):
  378. lazy_tensor_decls.append(
  379. f"""auto node_{arg.name} = {arg.name} ?
  380. c10::make_optional(torch::lazy::LazyGraphExecutor::Get()->
  381. GetIrValueForScalarFromCodegen(*{arg.name}, *common_device)):
  382. c10::nullopt;"""
  383. )
  384. else:
  385. lazy_tensor_decls.append(
  386. f"""auto node_{arg.name} = torch::lazy::LazyGraphExecutor::Get()->
  387. GetIrValueForScalarFromCodegen({arg.name}, *common_device);"""
  388. )
  389. elif arg.is_symint_or_list:
  390. continue # values are extracted in isValueType
  391. elif isinstance(arg.lazy_type, BaseCType):
  392. if arg.lazy_type.type is tensorListValueT:
  393. lazy_tensor_decls.append(
  394. f"auto lazy_{arg.name}_tensorlist = "
  395. f"{self.backend_namespace}::{self.get_tensorlist}({arg.name});"
  396. )
  397. else:
  398. lazy_tensor_decls.append(
  399. f"{self.lazy_tensor_ptr} lazy_{arg.name} = "
  400. f"{self.backend_namespace}::{self.get_tensor_or_wrap_number}({arg.name}, *common_device);"
  401. )
  402. elif isinstance(arg.lazy_type, OptionalCType):
  403. assert arg.lazy_type.elem == BaseCType(getValueT()), arg.lazy_type.elem
  404. # TODO(alanwaketan): Maybe we want to apply GetLtcTensorOrCreateForWrappedNumber here, but hold it
  405. # until we encounter a real world example.
  406. lazy_tensor_decls.append(
  407. f"{self.lazy_tensor_ptr} lazy_{arg.name} = "
  408. f"{self.backend_namespace}::{self.try_get_tensor}({arg.name}.value_or(at::Tensor()));"
  409. )
  410. else:
  411. raise AssertionError(
  412. f"TODO not sure if there are other valid types to handle here ({arg.lazy_type})"
  413. )
  414. return ("\n ").join(lazy_tensor_decls)
  415. def force_eager_fallback(
  416. self,
  417. func: NativeFunction,
  418. schema: LazyIrSchema,
  419. metadata: BackendMetadata,
  420. sig: Union[DispatcherSignature, NativeSignature],
  421. ) -> str:
  422. if self.gen_forced_fallback_code:
  423. return gen_fallback_code(
  424. schema, sig, overload_name=func.func.name.overload_name
  425. )
  426. return ""
  427. def metrics(self, func: NativeFunction, schema: LazyIrSchema) -> str:
  428. return f"{self.metrics_counter};"
  429. def get_device(self, func: NativeFunction, schema: LazyIrSchema) -> str:
  430. value_args = schema.filtered_args(values=True, scalars=False)
  431. scalar_args = schema.filtered_args(values=False, scalars=True)
  432. value_types_names = [f"{a.name}" for a in value_args if not a.is_wrapped_scalar]
  433. optional_device = OptionalCType(BaseCType(deviceT))
  434. optional_devices = [
  435. a.name for a in scalar_args if a.lazy_type == optional_device
  436. ]
  437. assert (
  438. len(value_types_names) > 0 or len(optional_devices) > 0
  439. ), "Expected at least one Value or Device type"
  440. get_device_str = (
  441. f"{self.get_device_fn}({', '.join(value_types_names + optional_devices)})"
  442. )
  443. return f"""auto common_device = {get_device_str};
  444. TORCH_INTERNAL_ASSERT(common_device);
  445. """
  446. def shape_inference(self, func: NativeFunction, schema: LazyIrSchema) -> str:
  447. metadata = self.backend_index.get_kernel(func)
  448. assert metadata is not None
  449. all_args = schema.filtered_args()
  450. returns_length = len(schema.returns)
  451. # call the meta kernel if it exists, to compute output shape/dtype for our IR
  452. # Note [Generated LTC Shape Functions]
  453. # LTC uses meta tensors from core to do shape inference when possible, and otherwise
  454. # we generate a shape function declaration that needs to be manually implemented.
  455. # How do we detect which ops are eligible to use meta tensors?
  456. # In general we should be able to use meta tensors not just on structured operators,
  457. # but also on composite operators that are implemented in terms of structured kernels.
  458. # We don't currently have a way of knowing at codegen time which ops are implemented that way.
  459. # This is the case for all view and view_copy operators however, so we're going to
  460. # use them specifically for all of the view_copy ops (instead of manually writing shape rules for all of them).
  461. is_view_copy_op = "view_copy" in func.tags
  462. is_structured = func.structured or func.structured_delegate is not None
  463. if is_structured or is_view_copy_op:
  464. meta_out = """
  465. std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};"""
  466. if returns_length > 1:
  467. def this_shape(i: int) -> str:
  468. return f"torch::lazy::Shape(std::get<{i}>(out_meta).scalar_type(), std::get<{i}>(out_meta).sizes().vec())"
  469. shapes_str = ",".join([this_shape(i) for i in range(returns_length)])
  470. meta_out = "std::vector<torch::lazy::Shape> shapes{" + shapes_str + "};"
  471. # Convert tensor args to the meta device and call it.
  472. # (We can't pass in the input tensors directly, because they are "functional wrappers".
  473. # If any of the meta kernels call a tensor op and redispatch, we don't want to hit the functionalize kernels.)
  474. # Even at::meta:: functions might redispatch, e.g. if they call into view ops.
  475. dispatcher_sig = DispatcherSignature.from_schema(func.func)
  476. meta_conversion_str, meta_call_ctx = convert_to_meta_tensors(dispatcher_sig)
  477. meta_call_args = [
  478. e.expr
  479. for e in translate(
  480. meta_call_ctx, dispatcher_sig.arguments(), method=False
  481. )
  482. ]
  483. if is_view_copy_op:
  484. # view_copy ops always have a CompositeExplicitAutogradNonFunctional kernel
  485. assert func.has_composite_explicit_autograd_non_functional_kernel
  486. dispatch_ns = "compositeexplicitautogradnonfunctional"
  487. else:
  488. dispatch_ns = "meta"
  489. aten_name = schema.aten_name
  490. # TODO: this is trolling
  491. if func.func.has_symint() and metadata.supports_symint():
  492. aten_name += "_symint"
  493. shape_str = f"""\
  494. {meta_conversion_str}
  495. auto out_meta = at::{dispatch_ns}::{aten_name}({', '.join(meta_call_args)});
  496. {meta_out}"""
  497. else:
  498. shape_sig = ComputeShapeSignature(
  499. metadata.kernel, func, symint=metadata.supports_symint()
  500. )
  501. shape_str = f"""
  502. auto shapes = {shape_sig.shape_call};"""
  503. shape_str += f"""
  504. TORCH_INTERNAL_ASSERT(shapes.size() == {returns_length});"""
  505. # Calculating which dimensions are symbolic
  506. func_schema_str = "aten::" + str(func.func)
  507. shape_str += f"""
  508. if(torch::lazy::symbolicShapeEnabled()){{
  509. std::vector<torch::jit::IValue> inputs = {{ {', '.join(str(a.name) for a in all_args)} }};
  510. const char* schema_str = "{func_schema_str}";
  511. applySymbolicShapesOnLT(schema_str, inputs, shapes);
  512. }}
  513. """
  514. return shape_str
  515. def build_ir_node(self, func: NativeFunction, schema: LazyIrSchema) -> str:
  516. node_ctor_input_str = node_ctor_inputs(schema)
  517. return f"""torch::lazy::NodePtr node = torch::lazy::ReuseNode<{schema.node_name}>({node_ctor_input_str});
  518. if (!node) {{
  519. {self.shape_inference(func, schema)}
  520. node = torch::lazy::MakeNode<{schema.node_name}>({node_ctor_input_str}, std::move(shapes));
  521. CacheNode(node);
  522. }}
  523. """
  524. def create_lazy_tensor(self, first_tensor_name: Optional[str] = None) -> str:
  525. # xla uses an instance method for tensor creation, for the time being
  526. if self.create_from_first_tensor:
  527. # TODO(whc) remove this if XLA switches to using static method for creation
  528. assert (
  529. first_tensor_name is not None
  530. ), "Requires first tensor to create lazy tensor"
  531. return f"{first_tensor_name}.{self.create_tensor}"
  532. return f"{self.backend_namespace}::{self.create_tensor}"
  533. def return_aten_tensor(self, func: NativeFunction, schema: LazyIrSchema) -> str:
  534. returns_length = len(schema.returns)
  535. value_args = schema.filtered_args(values=True, scalars=False)
  536. value_types_names = [f"{a.name}" for a in value_args if not a.is_wrapped_scalar]
  537. first_tensor_name = value_types_names[0] if len(value_types_names) > 0 else None
  538. bridge_str = f"""auto result = {self.create_aten_from_ltc_tensor}(
  539. {self.create_lazy_tensor(first_tensor_name)}(std::move(node), *common_device));"""
  540. if returns_length > 1:
  541. assert (
  542. len(value_types_names) > 0
  543. ), "Code below assumes there is at least one tensor arg"
  544. bridge_str = f"""std::vector<{self.lazy_tensor_ptr}> lazy_tensors;
  545. for (int i = 0; i < {returns_length}; i++) {{
  546. lazy_tensors.push_back({self.create_lazy_tensor(first_tensor_name)}({getValueT()}(node, i), *common_device));
  547. }}
  548. auto result = {self.tuple_aten_from_ltc_tensors}<{returns_length}>(lazy_tensors);"""
  549. if schema.name.name.inplace or func.func.is_out_fn():
  550. assert returns_length == 1, (
  551. "We assumed there was no such case where an op is an in-place variant "
  552. f"and has tuple outputs, but got tuple of len {returns_length}."
  553. )
  554. bridge_str = f"""lazy_{first_tensor_name}->SetInPlaceIrValue(node);
  555. auto& result = {first_tensor_name};"""
  556. bridge_str += """
  557. return result;"""
  558. return bridge_str
  559. @method_with_native_function
  560. def __call__(self, func: NativeFunction) -> List[str]:
  561. sig = kernel_signature(func, self.backend_index)
  562. metadata = self.backend_index.get_kernel(func)
  563. assert metadata is not None
  564. schema = LazyIrSchema(func.func, symint=metadata.supports_symint())
  565. return [
  566. f"""\
  567. {sig.decl(name=f"{self.class_method_name}::{metadata.kernel}")} {{
  568. {self.force_eager_fallback(func, schema, metadata, sig)}
  569. {self.metrics(func, schema)}
  570. {self.get_device(func, schema)}
  571. {self.lazy_tensor_decls(func, schema)}
  572. {self.build_ir_node(func, schema)}
  573. {self.return_aten_tensor(func, schema)}
  574. }}\n
  575. """
  576. ]
  577. class ComputeShapeSignature:
  578. """
  579. Here we use the base name as the suffix of the signature to avoid generating for in-place variants.
  580. """
  581. def __init__(self, kernel_name: str, f: NativeFunction, *, symint: bool):
  582. self.__schema = LazyIrSchema(f.func, symint=symint)
  583. self.__dispatch_args = ", ".join(
  584. [a.decl() for a in dispatcher.arguments(f.func, symint=symint)]
  585. )
  586. self.__call_args = ", ".join(
  587. [f"{arg.name}" for arg in self.__schema.filtered_args(generator=True)]
  588. )
  589. self.__kernel_name = kernel_name
  590. def __decl_suffix(self) -> str:
  591. return f"{self.__kernel_name}({self.__dispatch_args})"
  592. def __call_suffix(self) -> str:
  593. return f"{self.__kernel_name}({self.__call_args})"
  594. @property
  595. def shape_decl(self) -> str:
  596. return f"TORCH_API std::vector<torch::lazy::Shape> compute_shape_{self.__decl_suffix()}"
  597. @property
  598. def shape_call(self) -> str:
  599. return f"torch::lazy::compute_shape_{self.__call_suffix()}"
  600. @dataclass(frozen=True)
  601. class GenLazyShapeInferenceDefinition:
  602. backend_index: BackendIndex
  603. tensor_class: str
  604. @method_with_native_function
  605. def __call__(self, f: NativeFunction) -> List[str]:
  606. sig = kernel_signature(f, self.backend_index)
  607. metadata = self.backend_index.get_kernel(f)
  608. assert metadata is not None
  609. # See Note [Generated LTC Shape Functions]
  610. is_view_copy_op = "view_copy" in f.tags
  611. is_structured = f.structured or f.structured_delegate is not None
  612. if is_structured or is_view_copy_op:
  613. return []
  614. else:
  615. shape_sig = ComputeShapeSignature(
  616. metadata.kernel, f, symint=metadata.supports_symint()
  617. )
  618. return ["\n".join([f"{shape_sig.shape_decl};"])]
  619. def generate_non_native_lazy_ir_nodes(
  620. non_native: List[Dict[str, Any]], gen_lazy_ir: GenLazyIR
  621. ) -> List[str]:
  622. """Generate the non-native lazy IR node classes"""
  623. nodes = []
  624. for op in non_native:
  625. # Set default properties for Non-Native IRs
  626. properties = LazyIrProperties("ShapeCache", "CanBeReused", "LowerDeclOnly")
  627. for p in op.get("properties", []):
  628. setattr(properties, p, True)
  629. # non-native is assumed to want symint bindings if you wrote symint
  630. schema = LazyIrSchema(FunctionSchema.parse(op["func"]), properties, symint=True)
  631. schema.opkind = op.get("opkind")
  632. nodes.append(gen_lazy_ir.gen(schema)[0])
  633. return nodes