translate.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431
  1. from typing import Dict, List, NoReturn, Sequence, Union
  2. from torchgen.api.types import (
  3. ArrayRefCType,
  4. BaseCType,
  5. Binding,
  6. boolT,
  7. ConstRefCType,
  8. deviceT,
  9. Expr,
  10. intArrayRefT,
  11. iOptTensorListRefT,
  12. layoutT,
  13. ListCType,
  14. longT,
  15. memoryFormatT,
  16. MutRefCType,
  17. NamedCType,
  18. opmath_t,
  19. OptionalCType,
  20. optionalIntArrayRefT,
  21. optionalScalarRefT,
  22. optionalSymIntArrayRefT,
  23. optionalTensorRefT,
  24. scalar_t,
  25. scalarT,
  26. scalarTypeT,
  27. SpecialArgName,
  28. symIntArrayRefT,
  29. SymIntT,
  30. tensorOptionsT,
  31. tensorT,
  32. VectorCType,
  33. )
  34. # This file implements a small program synthesis engine that implements
  35. # conversions between one API to another.
  36. #
  37. # The key data type in this file in NamedCType, short for Named C++ semantic type. A NamedCType
  38. # represents a C++ type, plus semantic information about what it represents.
  39. # For example, consider the argument "bool pin_memory"; its normal C++ type is
  40. # "bool", but its C++ semantic type also keeps track that this represents a
  41. # "pin_memory"; you can't just use a random other boolean in a context where you
  42. # need a "pin_memory"!
  43. #
  44. # The translator takes a list of needed NamedCTypes, and then figures out how
  45. # to construct expressions with these NamedCTypes from the given bindings. Many
  46. # of these expressions are trivial (I need a Tensor other; there's a Tensor
  47. # other scope); others are more nontrivial and may require packing/unpacking.
  48. # Some examples of non-trivial action:
  49. #
  50. # - Need the "dtype" binding? Well, maybe "dtype" isn't available
  51. # in the context, instead, "options" is, and you need to extract
  52. # it from there. (Gather)
  53. #
  54. # - Need the "context" binding? Well, maybe "context" isn't available
  55. # in the context, and you need to construct it from "dtype", "device",
  56. # etc. (Scatter)
  57. #
  58. # - Need the "memory_format" binding? Well, actually, it's available
  59. # from both "memory_format" and "options", so you had better make sure
  60. # they are consistent. (Join)
  61. options_ctype = NamedCType("options", ConstRefCType(BaseCType(tensorOptionsT)))
  62. out_tensor_ctype = NamedCType("out", ConstRefCType(BaseCType(tensorT)))
  63. longVec_ctype = VectorCType(BaseCType(longT))
  64. longSymVec_ctype = VectorCType(BaseCType(SymIntT))
  65. optionalLongVec_ctype = OptionalCType(VectorCType(BaseCType(longT)))
  66. optionalScalar_ctype = OptionalCType(BaseCType(scalarT))
  67. optionalTensor_ctype = OptionalCType(BaseCType(tensorT))
  68. class UnsatError(RuntimeError):
  69. pass
  70. # Given a set of in-scope bindings and a set of target bindings, synthesize
  71. # a list of expressions that uses only the in-scope bindings (bindings) that
  72. # have all of the types of goals. You may want to use this function if
  73. # you're generating code for a function like:
  74. #
  75. # void f({args}) {
  76. # g({exprs}); // g is a different API
  77. # }
  78. #
  79. # and you need to generate "exprs".
  80. #
  81. # Typically, a list of Bindings is convenient to get (you usually call something
  82. # like arguments() to get them); but technically you only need less information:
  83. # for 'bindings' an (un-ordered) list of Exprs is sufficient; similarly, for
  84. # 'goals', an (ordered) list of NamedCType goals is sufficient. If you are doing
  85. # something more complicated, e.g., tracking the set of bindings in a context,
  86. # you may find using these smaller types more convenient.
  87. def translate(
  88. bindings: Sequence[Union[Expr, Binding]],
  89. goals: Sequence[Union[NamedCType, Binding]],
  90. *,
  91. method: bool = False,
  92. allow_expensive_conversions: bool = False,
  93. ) -> List[Expr]:
  94. binding_exprs: List[Expr] = []
  95. for b in bindings:
  96. if isinstance(b, Binding):
  97. binding_exprs.append(
  98. Expr(
  99. expr=b.name,
  100. type=b.nctype,
  101. )
  102. )
  103. else:
  104. binding_exprs.append(b)
  105. goal_ctypes: List[NamedCType] = []
  106. for g in goals:
  107. if isinstance(g, Binding):
  108. goal_ctypes.append(g.nctype)
  109. else:
  110. goal_ctypes.append(g)
  111. # Add all the bindings to the context
  112. ctx: Dict[NamedCType, str] = {}
  113. for b in binding_exprs:
  114. ctx[b.type] = b.expr
  115. # While we're at it, do some simple forward inference, looking through
  116. # constructors.
  117. #
  118. # NB: When should you do forward inference versus backward inference?
  119. # The general idea:
  120. #
  121. # - Backward inference WHEN the goal gets smaller
  122. # - Forward inference WHEN the hypothesis gets smaller
  123. #
  124. # This helps ensure termination: backward inference starts with a goal
  125. # and tries to make it simpler and simpler until it's trivial; if the
  126. # goal can grow in size, we blow up to a really huge goal size.
  127. # Similarly, with forward inference we take hypotheses and decompose
  128. # them into simpler hypotheses; if hypotheses could expand in size,
  129. # we also have potential nontermination. (In the code below, forward
  130. # inference is only ever carried out at a single step, but you could
  131. # imagine repeated application of forward inference being profitable.)
  132. #
  133. # A good starting point in the literature for exploring more about proof
  134. # search are these lecture notes
  135. # https://www.cs.cmu.edu/~fp/courses/oregon-m10/04-focusing.pdf
  136. #
  137. # TODO: My kingdom for a pattern matcher
  138. # https://www.python.org/dev/peps/pep-0634/
  139. #
  140. # TODO: This could get us in recomputation trouble if b.expr is nontrivial.
  141. # Fix this by implementing some sort of sharing so that if multiple
  142. # goals share the same expression, we only compute it once. This seems
  143. # to matter in practice as compiler is often unwilling to CSE nontrivial
  144. # expressions like scalar.to<scalar_t>()
  145. t = b.type
  146. if (
  147. isinstance(t, ConstRefCType)
  148. and isinstance(t.elem, OptionalCType)
  149. and isinstance(t.elem.elem, BaseCType)
  150. and str(t.elem.elem.type) == "at::Tensor"
  151. ):
  152. ctx[
  153. NamedCType(t.elem.elem.name, ConstRefCType(BaseCType(tensorT)))
  154. ] = f"({b.expr}.has_value() ? *{b.expr} : at::Tensor())"
  155. if t.type == ConstRefCType(OptionalCType(BaseCType(tensorT))):
  156. ctx[
  157. NamedCType(t.name, BaseCType(optionalTensorRefT))
  158. ] = f"(({b.expr}.has_value() && (*{b.expr}).defined()) ? at::OptionalTensorRef(*{b.expr}) : at::OptionalTensorRef())"
  159. if t.type == ConstRefCType(BaseCType(scalarT)):
  160. ctx[NamedCType(t.name, BaseCType(opmath_t))] = f"({b.expr}).to<opmath_t>()"
  161. if t.type == ConstRefCType(OptionalCType(BaseCType(scalarT))):
  162. ctx[
  163. NamedCType(t.name, BaseCType(optionalScalarRefT))
  164. ] = f"({b.expr}.has_value() ? at::OptionalScalarRef(&({b.expr}.value())) : at::OptionalScalarRef())"
  165. if t.type == BaseCType(scalar_t):
  166. ctx[
  167. NamedCType(t.name, BaseCType(opmath_t))
  168. ] = f"static_cast<opmath_t>({b.expr})"
  169. # [Note: IOptTensorListRef]
  170. if t.type == ConstRefCType(ListCType(OptionalCType(BaseCType(tensorT)))):
  171. ctx[
  172. NamedCType(t.name, BaseCType(iOptTensorListRefT))
  173. ] = f"at::IOptTensorListRef({b.expr})"
  174. # Add implicit bindings if the generated code is inside a Tensor method
  175. if method:
  176. ctx[
  177. NamedCType("self", MutRefCType(BaseCType(tensorT)))
  178. ] = "const_cast<Tensor&>(*this)"
  179. ctx[
  180. NamedCType("self", ConstRefCType(BaseCType(tensorT)))
  181. ] = "const_cast<Tensor&>(*this)"
  182. # This is better! Byte-for-byte compat
  183. # ctx[NamedCType("self", ConstRefCType(BaseCType(tensorT)))] = "*this"
  184. def unsat(goal: NamedCType) -> NoReturn:
  185. ctx_desc = "\n".join(
  186. f" {t.cpp_type()} {t.name}; // {e}" for t, e in ctx.items()
  187. )
  188. raise UnsatError(
  189. f"""
  190. Failed to synthesize the expression "{goal.cpp_type()} {goal.name}".
  191. When I failed, the following bindings were available in the context:
  192. {ctx_desc}
  193. This probably means there is a missing rule in the rules of torchgen.api.translate.
  194. Check this module for more information.
  195. """
  196. )
  197. # A shitty backtracking search implementation. It's shitty because it
  198. # does backtracking via stack (bad idea!) and for the most part tries to
  199. # avoid backtracking. In particular, if
  200. # direct=True, we won't try to do any fancy synthesis, just trivial
  201. # conversions (e.g., "T a" is OK for "const T& a"). So all of the
  202. # existing rules in this function simply try to solve immediately,
  203. # and bail if things don't work out.
  204. def solve(goal: NamedCType, *, direct: bool) -> str:
  205. def direct_solve(goal: NamedCType) -> str:
  206. return solve(goal, direct=True)
  207. if goal in ctx:
  208. # Trivial
  209. return ctx[goal]
  210. # const & is satisfied with mutable &
  211. if isinstance(goal.type, ConstRefCType):
  212. try:
  213. # WARNING: not strictly decreasing; be careful not
  214. # to add a direct conversion that goes satisfies
  215. # mutable& with const&
  216. return solve(
  217. NamedCType(goal.name, MutRefCType(goal.type.elem)), direct=direct
  218. )
  219. except UnsatError:
  220. pass
  221. # mutable & is satisfied with value
  222. if isinstance(goal.type, MutRefCType):
  223. try:
  224. return solve(NamedCType(goal.name, goal.type.elem), direct=direct)
  225. except UnsatError:
  226. pass
  227. # TODO: These are referentially equal, shouldn't have to do this;
  228. # ensuring we don't use type synonym IntArrayRef in codegen would
  229. # help
  230. if goal.type == ArrayRefCType(BaseCType(longT)):
  231. return solve(NamedCType(goal.name, BaseCType(intArrayRefT)), direct=direct)
  232. if direct:
  233. unsat(goal)
  234. # For now, all of these rules are mutually exclusive.
  235. if goal == NamedCType("memory_format", OptionalCType(BaseCType(memoryFormatT))):
  236. memory_format = direct_solve(
  237. NamedCType(
  238. SpecialArgName.possibly_redundant_memory_format,
  239. OptionalCType(BaseCType(memoryFormatT)),
  240. )
  241. )
  242. # No need to join "memory_format" and "options" if the target API takes "options" directly.
  243. # Otherwise it will cause the redundant memory_format error.
  244. if options_ctype in goal_ctypes:
  245. return memory_format
  246. try:
  247. options = direct_solve(options_ctype)
  248. return f"c10::impl::check_tensor_options_and_extract_memory_format({options}, {memory_format})"
  249. except UnsatError:
  250. return memory_format
  251. elif goal == NamedCType("options", BaseCType(tensorOptionsT)):
  252. dtype = direct_solve(
  253. NamedCType("dtype", OptionalCType(BaseCType(scalarTypeT)))
  254. )
  255. pin_memory = direct_solve(
  256. NamedCType("pin_memory", OptionalCType(BaseCType(boolT)))
  257. )
  258. device = direct_solve(
  259. NamedCType("device", OptionalCType(BaseCType(deviceT)))
  260. )
  261. layout = direct_solve(
  262. NamedCType("layout", OptionalCType(BaseCType(layoutT)))
  263. )
  264. return f"TensorOptions().dtype({dtype}).layout({layout}).device({device}).pinned_memory({pin_memory})"
  265. elif goal == NamedCType("dtype", OptionalCType(BaseCType(scalarTypeT))):
  266. try:
  267. options = direct_solve(options_ctype)
  268. return f"optTypeMetaToScalarType({options}.dtype_opt())"
  269. except UnsatError:
  270. out_tensor = direct_solve(out_tensor_ctype)
  271. return f"{out_tensor}.scalar_type()"
  272. elif goal == NamedCType("layout", OptionalCType(BaseCType(layoutT))):
  273. try:
  274. options = direct_solve(options_ctype)
  275. return f"{options}.layout_opt()"
  276. except UnsatError:
  277. out_tensor = direct_solve(out_tensor_ctype)
  278. return f"{out_tensor}.layout()"
  279. elif goal == NamedCType("device", OptionalCType(BaseCType(deviceT))):
  280. try:
  281. options = direct_solve(options_ctype)
  282. return f"{options}.device_opt()"
  283. except UnsatError:
  284. out_tensor = direct_solve(out_tensor_ctype)
  285. return f"{out_tensor}.device()"
  286. elif goal == NamedCType("pin_memory", OptionalCType(BaseCType(boolT))):
  287. try:
  288. options = direct_solve(options_ctype)
  289. return f"{options}.pinned_memory_opt()"
  290. except UnsatError:
  291. # If we're calling a factory op from its out= variant,
  292. # We don't actually care about the value of pin_memory.
  293. out_tensor = direct_solve(out_tensor_ctype)
  294. return "c10::nullopt"
  295. # We can always do translations from value types to reference types, like vector<int> -> IntArrayRef
  296. elif goal.type == BaseCType(intArrayRefT):
  297. try:
  298. return direct_solve(NamedCType(goal.name, longVec_ctype))
  299. except UnsatError:
  300. # We can also go SymIntArrayRef -> IntArrayRef
  301. symIntArrayRef_type = direct_solve(
  302. NamedCType(goal.name, BaseCType(symIntArrayRefT))
  303. )
  304. return f"C10_AS_INTARRAYREF_SLOW({symIntArrayRef_type})"
  305. elif goal.type == BaseCType(symIntArrayRefT):
  306. try:
  307. r = direct_solve(NamedCType(goal.name, BaseCType(intArrayRefT)))
  308. return f"c10::fromIntArrayRefSlow({r})"
  309. except UnsatError:
  310. return direct_solve(NamedCType(goal.name, longSymVec_ctype))
  311. elif goal.type == BaseCType(SymIntT):
  312. return direct_solve(NamedCType(goal.name, BaseCType(longT)))
  313. elif goal.type == OptionalCType(BaseCType(SymIntT)):
  314. argname = direct_solve(
  315. NamedCType(goal.name, OptionalCType(BaseCType(longT)))
  316. )
  317. return f"{argname}.has_value() ? c10::make_optional(c10::SymInt(*{argname})) : c10::nullopt"
  318. elif goal.type == BaseCType(longT):
  319. symInt_type = direct_solve(NamedCType(goal.name, BaseCType(SymIntT)))
  320. return f"{symInt_type}.expect_int()"
  321. elif goal.type == OptionalCType(BaseCType(longT)):
  322. argname = direct_solve(
  323. NamedCType(goal.name, OptionalCType(BaseCType(SymIntT)))
  324. )
  325. return f"{argname}.has_value() ? c10::make_optional({argname}->expect_int()) : c10::nullopt"
  326. elif goal.type == BaseCType(optionalIntArrayRefT):
  327. try:
  328. return direct_solve(NamedCType(goal.name, optionalLongVec_ctype))
  329. except UnsatError:
  330. argname = direct_solve(
  331. NamedCType(goal.name, BaseCType(optionalSymIntArrayRefT))
  332. )
  333. return f"{argname}.has_value() ? c10::make_optional(C10_AS_INTARRAYREF_SLOW(*{argname})) : c10::nullopt"
  334. elif goal.type == BaseCType(optionalSymIntArrayRefT):
  335. # TODO: You might also want to solve this from longSymVec_ctype or
  336. # an optional version of it
  337. argname = direct_solve(
  338. NamedCType(goal.name, BaseCType(optionalIntArrayRefT))
  339. )
  340. return f"{argname}.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*{argname})) : c10::nullopt"
  341. elif goal.type == BaseCType(optionalScalarRefT):
  342. return direct_solve(NamedCType(goal.name, optionalScalar_ctype))
  343. elif goal.type == BaseCType(optionalTensorRefT):
  344. return direct_solve(NamedCType(goal.name, optionalTensor_ctype))
  345. # Note [translation from C++ reference to value types]
  346. # The below cases are all for when we have an argument with a reference type,
  347. # and a corresponding goal with a value type.
  348. # These are needed when we populate the inputs to a lambda capture and we need
  349. # to guarantee the lifetime of each captured argument.
  350. # We guard it with an explicit kwarg because converting to a value type is expensive
  351. # (O(n)) to convert from IntArrayRef to vector<int>),
  352. # so the caller of translate() should be explicit that they need it.
  353. if allow_expensive_conversions:
  354. if goal.type == VectorCType(BaseCType(longT)):
  355. intArrayRef_ctype = NamedCType(goal.name, BaseCType(intArrayRefT))
  356. argname = direct_solve(intArrayRef_ctype)
  357. return f"{argname}.vec()"
  358. if goal.type == VectorCType(BaseCType(SymIntT)):
  359. symIntArrayRef_ctype = NamedCType(goal.name, BaseCType(symIntArrayRefT))
  360. argname = direct_solve(symIntArrayRef_ctype)
  361. return f"{argname}.vec()"
  362. elif goal.type == OptionalCType(VectorCType(BaseCType(longT))):
  363. optionalIntArrayRef_ctype = NamedCType(
  364. goal.name, BaseCType(optionalIntArrayRefT)
  365. )
  366. argname = direct_solve(optionalIntArrayRef_ctype)
  367. return f"{argname}.has_value() ? c10::make_optional({argname}->vec()) : c10::nullopt"
  368. elif goal.type == OptionalCType(BaseCType(scalarT)):
  369. optionalScalarRef_ctype = NamedCType(
  370. goal.name, BaseCType(optionalScalarRefT)
  371. )
  372. argname = direct_solve(optionalScalarRef_ctype)
  373. return f"{argname}.has_value() ? c10::make_optional({argname}) : c10::nullopt"
  374. elif goal.type == OptionalCType(BaseCType(scalarT)):
  375. optionalTensorRef_ctype = NamedCType(
  376. goal.name, BaseCType(optionalTensorRefT)
  377. )
  378. argname = direct_solve(optionalTensorRef_ctype)
  379. return f"{argname}.has_value() ? c10::make_optional({argname}) : c10::nullopt"
  380. # Technically, we also need to handle cases of C++ containers holding reference types.
  381. # But there currently aren't any ops that require lambda capture codegen
  382. # With arguments like std::vector<IntArrayRef>.
  383. # If that changes, we'll have to add the translation here.
  384. # We allow const casting on tensors, since const-correctness is a bit broken for at::Tensor.
  385. # We could probably generalize this to non-tensor types too.
  386. if goal.type == MutRefCType(BaseCType(tensorT)):
  387. const_ref_tensor_ctype = NamedCType(
  388. goal.name, ConstRefCType(BaseCType(tensorT))
  389. )
  390. argname = direct_solve(const_ref_tensor_ctype)
  391. return f"const_cast<Tensor&>({argname})"
  392. unsat(goal)
  393. return [Expr(solve(g, direct=False), g) for g in goal_ctypes]