LegacyVmapTransforms.h 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188
  1. // Copyright (c) Facebook, Inc. and its affiliates.
  2. // All rights reserved.
  3. //
  4. // This source code is licensed under the BSD-style license found in the
  5. // LICENSE file in the root directory of this source tree.
  6. #pragma once
  7. #include <ATen/functorch/Macros.h>
  8. #include <ATen/functorch/BatchedTensorImpl.h>
  9. namespace at {
  10. namespace functorch {
  11. // This files contains the legacy (now-deprecated) batching rule API.
  12. // Please try to use the new-style batching rule API (see writing_batch_rules.md)
  13. // This file contains abstractions used for transforming *logical* vmap arguments
  14. // into *physical* arguments. (Keep reading for definitions of these terms).
  15. // NOTE: [Logical vs physical args]
  16. // Consider the following vmap.
  17. // vmap(vmap(func, in_dims=(2,)), in_dims=(0,))(torch.ones(2, 3, 4))
  18. // This would produce a BatchedTensor wrapping a Tensor of size [2, 3, 4],
  19. // with batch dims 0 and 2:
  20. // BatchedTensor(ones(2, 3, 4), bdims=[(lvl=1,dim=0),(lvl=2,dim=2)])
  21. //
  22. // We say the *logical* view of the tensor has size [3] -- tensors inside
  23. // `func` appear to have size [3].
  24. // However, the *physical* underlying tensor (the one passed to vmap) has size
  25. // [2, 3, 4].
  26. //
  27. // This notion of logical vs physical also extends to non-tensor arguments.
  28. // Consider the previous tensor; let's assume the user called
  29. // `torch.sum(tensor, dim=0)` inside of `func`. Then the logical
  30. // dimension they are reducing over is dim 0 but the physical dim is dim 1
  31. // (the first non-batch dimension)
  32. // Forward declared; see NOTE: [What is a VmapPhysicalView?]
  33. struct VmapPhysicalView;
  34. // Most PyTorch operators take 4 or fewer inputs.
  35. constexpr int64_t kVmapTransformStaticInputSize = 4;
  36. using VmapPhysicalViewVec = SmallVector<VmapPhysicalView, kVmapTransformStaticInputSize>;
  37. // Pytorch generally advertises good performance for <= 5 dims.
  38. // (see ATen/core/DimVector.h). We add a few extra dims (~3) for vmap
  39. // dimensions to get 8. Adjust this number as necessary
  40. constexpr int64_t kVmapStaticDimVecSize = 8;
  41. using VmapDimVector = SmallVector<int64_t, kVmapStaticDimVecSize>;
  42. // NOTE: [What is an VmapTransform?]
  43. // An *VmapTransform* converts logical views of tensors to physical views.
  44. //
  45. // Batching rules use VmapTransforms to convert logical arguments to
  46. // physical arguments, then call one or more at:: operator that handles the
  47. // physical arguments, and then converts the physical result back to a logical
  48. // argument.
  49. // VmapTransform for operators that take tensors with multiple batch dims.
  50. // Given one or more logical views on Tensors, `logicalToPhysical`
  51. // permutes all of the batch dims to the front of the tensor, aligns
  52. // and expands the batch dims to match each other (according to their `level`),
  53. // and returns a VmapPhysicalView on the tensor(s).
  54. struct TORCH_API MultiBatchVmapTransform {
  55. static VmapPhysicalView logicalToPhysical(const Tensor& logical_tensor);
  56. static VmapPhysicalViewVec logicalToPhysical(ITensorListRef logical_tensors);
  57. };
  58. // VmapTransform for operators that broadcast all inputs.
  59. // Given some logical views on Tensors, `logicalToPhysical`:
  60. // - permutes all of the batch dims to the front of the tensors
  61. // - aligns all the batch dims to the collective levels of all of the tensors.
  62. // If a tensor does not have a batch dim for a vmap level, then it receives
  63. // a size-one dimension for said level.
  64. // - aligns the non-batch dims to have the same dimensionality, adding extra
  65. // size-1 dimensions in between the batch dimensions and the non-batch dimensions
  66. // so that the batch dimensions are lined up from the right.
  67. //
  68. // For example: given inputs of size (B, 2) and (B, 3, 2) where B is the batch
  69. // dimension, BroadcastingVmapTransform returns VmapPhysicalViews that wrap tensors
  70. // of size (B, 1, 2) and (B, 3, 2).
  71. //
  72. // Given inputs of size (B, 2) and (2,), BroadcastingVmapTransform returns
  73. // VmapPhysicalViews wrapping tensors of size (B, 2) and (1, 2). We don't
  74. // actually *need* to return a tensor of size (1, 2) for the second tensor
  75. // because the broadcasting operation takes care of that for us, but we do
  76. // it anyways to keep things simple.
  77. struct TORCH_API BroadcastingVmapTransform {
  78. static VmapPhysicalViewVec logicalToPhysical(TensorList logical_tensors);
  79. };
  80. // Forward declared, if you're reading this file head to toe, don't worry about
  81. // it yet.
  82. struct VmapPhysicalToLogicalMap;
  83. // NOTE: [What is a VmapPhysicalView?]
  84. // VmapPhysicalView represents a physical view on a Tensor.
  85. //
  86. // One can use it to further convert logical dimension indices, logical shapes,
  87. // and more to their physical variants, or convert a new (physical) tensor into
  88. // a logical BatchedTensor. (TODO(rzou): some of these are not yet implemented).
  89. //
  90. // VmapPhysicalView stores a physical tensor with all of its batch dimensions at
  91. // the front and some levels that correspond to said batch dimensions.
  92. //
  93. // The levels bitset specifies which vmap levels correspond to the batch
  94. // dimensions at the front of the tensor. In particular, the number of set bits
  95. // corresponds to the number of batch dimensions on `tensor` and the rightmost
  96. // bit of `levels` specifies the maximum number of nested vmaps we are in at
  97. // this point in time.
  98. // For example, given:
  99. // physical_view = VmapPhysicalView(tensor=ones(2, 3, 4, 5, 6), levels={1, 3})
  100. //
  101. // Rightmost bit of `levels` is 3 indicating the number of nested vmaps less
  102. // than or equal to 3.
  103. // bitset: 010100
  104. // ^
  105. // |
  106. // levels: 012345
  107. struct TORCH_API VmapPhysicalView {
  108. VmapPhysicalView(Tensor&& tensor, std::bitset<kVmapNumLevels> levels)
  109. : levels_(levels), tensor_(tensor) {
  110. // TORCH_INTERNAL_ASSERT(!isBatchedTensor(tensor));
  111. }
  112. Tensor& tensor() { return tensor_; }
  113. const Tensor& tensor() const { return tensor_; }
  114. // Maps logical dim indices to physical dim indices. Also does dim wrapping.
  115. //
  116. // For example, given:
  117. // physical_view = VmapPhysicalView(tensor=ones(2, 3, 4, 5), levels={1, 3})
  118. //
  119. // Then physical_view.getPhysicalDims({0, 1}) returns {2, 3}.
  120. // This is because the size of levels tell us that the first two dimensions
  121. // of `tensor_` are batch dimensions, so a logical dim of `n` is actually
  122. // a physical dim of `n + 2`.
  123. VmapDimVector getPhysicalDims(IntArrayRef logical_dims) const;
  124. int64_t getPhysicalDim(int64_t logical_dim) const;
  125. // Returns a VmapPhysicalToLogicalMap object. This can be used for
  126. // mapping a physical tensor to a new logical tensor (BatchedTensor)
  127. VmapPhysicalToLogicalMap getPhysicalToLogicalMap() const;
  128. // Maps a logical shape to a physical shape by pre-pending the batch
  129. // sizes to the logical shape.
  130. VmapDimVector getPhysicalShape(IntArrayRef logical_shape) const;
  131. SymDimVector getPhysicalShape(c10::SymIntArrayRef logical_shape) const;
  132. int64_t numBatchDims() const;
  133. private:
  134. int64_t numLogicalDims() const;
  135. std::bitset<kVmapNumLevels> levels_;
  136. Tensor tensor_;
  137. };
  138. // Convenience struct used for mapping a physical tensor (a non-BatchedTensor)
  139. // to a logical one (BatchedTensor). It holds some levels that are used to do the
  140. // mapping and assumes that the batch dimensions in the physical tensor all
  141. // occur at the front of the tensor.
  142. struct TORCH_API VmapPhysicalToLogicalMap {
  143. VmapPhysicalToLogicalMap(std::bitset<kVmapNumLevels> levels): levels_(levels) {}
  144. // Maps a physical tensor to a new logical tensor (BatchedTensor).
  145. // Assumes that all of the "batch dimensions" are at the front
  146. // of the physical tensor. For example, given:
  147. // - x = rank-4 Tensor with size 2, 3, 5, 7
  148. // - levels = (2, 4)
  149. // Returns:
  150. // - BatchedTensor(x, bdims=[(dim=0,lvl=2), (dim=1, lvl=4)])
  151. Tensor apply(const Tensor& physical_tensor) const;
  152. // Given a vector of physical tensors,
  153. // 1. maps each tensor to a new logical tensor. Assumes that all of the
  154. // "batch dimensions" are at the front of the physical tensors.
  155. // 2. stores the new logical tensors back into the passed-in vector. This is
  156. // to avoid additional dynamic allocations.
  157. void applyInplace(std::vector<Tensor>& physical_tensors) const;
  158. std::bitset<kVmapNumLevels> levels_;
  159. };
  160. }
  161. } // namespace at