LinearAlgebraUtils.h 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624
  1. #pragma once
  2. #include <c10/core/ScalarType.h>
  3. #include <c10/util/irange.h>
  4. #include <c10/util/Exception.h>
  5. #include <c10/util/strides.h>
  6. #include <ATen/core/Tensor.h>
  7. #include <ATen/ExpandUtils.h>
  8. #include <ATen/TensorUtils.h>
  9. #include <ATen/native/TensorIterator.h>
  10. #include <ATen/native/TransposeType.h>
  11. #include <limits>
  12. #include <type_traits>
  13. #include <sstream>
  14. #include <cstring>
  15. #include <cctype>
  16. #ifndef AT_PER_OPERATOR_HEADERS
  17. #include <ATen/Functions.h>
  18. #else
  19. #include <ATen/ops/arange.h>
  20. #include <ATen/ops/empty.h>
  21. #include <ATen/ops/empty_like.h>
  22. #include <ATen/ops/empty_strided.h>
  23. #include <ATen/ops/zeros.h>
  24. #endif
  25. namespace at { namespace native {
  26. static inline c10::MaybeOwned<Tensor> expect_resolved_conj(const Tensor& tensor) {
  27. if (tensor.is_conj()) {
  28. return c10::MaybeOwned<Tensor>::owned(tensor.resolve_conj());
  29. } else {
  30. return c10::MaybeOwned<Tensor>::borrowed(tensor);
  31. }
  32. }
  33. static inline DimVector batched_matrix_contiguous_strides(
  34. const IntArrayRef sizes,
  35. const bool f_contig = false) {
  36. // f_contig chooses between the strides of a batch of Fortran (F-contiguous)
  37. // and C-contiguous matrices
  38. auto strides = c10::contiguous_strides(sizes);
  39. auto dim = strides.size();
  40. if (f_contig && dim >= 2) {
  41. // Fix the strides of the last two dimensions, so that we return
  42. // C-contiguous batches of F-contiguous matrices.
  43. strides[dim - 1] = std::max(sizes[dim - 2], static_cast<int64_t>(1));
  44. strides[dim - 2] = 1;
  45. }
  46. return strides;
  47. }
  48. /*
  49. * Clones a Tensor so that the following conditions hold:
  50. * If we think of a Tensor of having size (B, M, N), where B is any number
  51. * of batch dimensions, then:
  52. * - Each (M, N) matrix is in column major form
  53. * - Let Tensor P have size (B, M, N) and Q have size (B, M', N').
  54. * Then when laid out in memory, the M by N matrix starting at
  55. * P.data_ptr()[B * M * N] is of the same corresponding batch as the M' by N'
  56. * matrix starting at Q.data_ptr()[B * M' * N'].
  57. */
  58. static inline Tensor cloneBatchedColumnMajor(const Tensor& src) {
  59. // If src is already in batched column major format, then
  60. // this will be efficient (no reordering of the data will occur)
  61. // because the first transpose will make the tensor contiguous,
  62. // and cloning a contiguous tensor is fast.
  63. auto result = src.mT().clone(at::MemoryFormat::Contiguous);
  64. result.transpose_(-2, -1);
  65. return result;
  66. }
  67. /*
  68. * contig chooses between C-contig (true) and F-contig (false)
  69. */
  70. static inline c10::MaybeOwned<Tensor> borrow_else_clone(const bool cond, const Tensor& borrow, const Tensor& clone, const bool contig) {
  71. return cond ? c10::MaybeOwned<Tensor>::borrowed(borrow)
  72. : c10::MaybeOwned<Tensor>::owned(contig ? clone.clone(MemoryFormat::Contiguous)
  73. : cloneBatchedColumnMajor(clone));
  74. }
  75. /*
  76. * This method is designed to be a faster alternative to
  77. * `cloneBatchedColumnMajor` with some additional features,
  78. * namely:
  79. * 1. It uses `copy` instead of `clone` which could be much faster.
  80. * 2. `nrows` parameter used to create inputs with the number of rows larger
  81. * than the original input, which is required for some LAPACK/MAGMA methods.
  82. * 3. `desired_batch_size` is used to create copies with the batch size
  83. * which is either the original batch size of the input, or its larger
  84. * broadcasted shape.
  85. */
  86. static inline Tensor copyBatchedColumnMajor(const Tensor& src, int64_t nrows = -1,
  87. at::OptionalIntArrayRef desired_batch_sizes = c10::nullopt) {
  88. nrows = (nrows == -1) ? src.size(-2) : nrows;
  89. auto copy_sizes = desired_batch_sizes.has_value()
  90. ? desired_batch_sizes.value().vec()
  91. : IntArrayRef(src.sizes().data(), src.dim() - 2).vec();
  92. copy_sizes.insert(copy_sizes.end(), {nrows, src.size(-1)});
  93. const auto copy_strides = batched_matrix_contiguous_strides(copy_sizes, /*f-contig*/true);
  94. auto copy = at::empty_strided(copy_sizes, copy_strides, src.options());
  95. copy.narrow(-2, 0, src.size(-2)).copy_(src);
  96. return copy;
  97. }
  98. /*
  99. * Given batches of matrices with arbitrary batch dim,
  100. * computes the number of batches.
  101. */
  102. static inline int64_t batchCount(const Tensor& batched_matrices) {
  103. int64_t result = 1;
  104. for (int64_t i = 0; i < batched_matrices.ndimension() - 2; i++) {
  105. result *= batched_matrices.size(i);
  106. }
  107. return result;
  108. }
  109. // Computes the number of elements of a matrix in a batched matrix tensor
  110. static inline int64_t matrixStride(const Tensor& batched_matrices) {
  111. return batched_matrices.size(-1) * batched_matrices.size(-2);
  112. }
  113. // Validates input shapes for operations on batches of square matrices (inverse, cholesky, symeig, eig)
  114. static inline void checkIsMatrix(const Tensor& A, const char* const f_name, const char* const arg_name = "A") {
  115. TORCH_CHECK(A.dim() >= 2, f_name, ": The input tensor ", arg_name, " must have at least 2 dimensions.");
  116. }
  117. static inline void squareCheckInputs(const Tensor& self, const char* const f_name, const char* const arg_name = "A") {
  118. checkIsMatrix(self, f_name, arg_name);
  119. TORCH_CHECK(self.size(-1) == self.size(-2),
  120. f_name,
  121. ": ", arg_name, " must be batches of square matrices, "
  122. "but they are ", self.size(-2), " by ", self.size(-1), " matrices");
  123. }
  124. static inline void checkInputsSolver(const Tensor& A,
  125. const Tensor& B,
  126. const bool left,
  127. const char* const f_name) {
  128. squareCheckInputs(A, f_name, "A");
  129. checkIsMatrix(B, f_name, "B");
  130. TORCH_CHECK(left ? A.size(-2) == B.size(-2) : A.size(-1) == B.size(-1),
  131. f_name, ": Incompatible shapes of A and B for the equation ",
  132. left ? "AX = B" : "XA = B",
  133. " (", A.size(-2), "x", A.size(-1), " and ", B.size(-2), "x", B.size(-1), ")");
  134. }
  135. static inline bool is_row_or_column_contiguous(const Tensor& t) {
  136. // This could be made more general, similar to how it's checked in matmul, which would allow to
  137. // ellide the copy with strides such as (6, 12, 1, 3) or (3, 1, 9), but this is quite tricky.
  138. // We choose to be conservative for simplicity
  139. return t.is_contiguous() || t.transpose(-2, -1).is_contiguous();
  140. }
  141. static inline TransposeType to_transpose_type(const bool contig, const bool conj) {
  142. if (conj) {
  143. if (contig) { TORCH_INTERNAL_ASSERT(false, "Invalid transpose type"); }
  144. else { return TransposeType::ConjTranspose; }
  145. } else {
  146. if (contig) { return TransposeType::NoTranspose; }
  147. else { return TransposeType::Transpose; }
  148. }
  149. }
  150. // This function is designed to be used with linear algebra methods that minimize
  151. // L(ax - b) = 0, where L is generally the identity map (`solve`, for example)
  152. // or the L2 norm (`lstsq`).
  153. // It is expected that `a` and `b` are contiguous tensors of column-major matrices
  154. // (so that a.view({-1, a.size(-2), a.size(-1)}) succeeds, same for `b`),
  155. // with the following additional properties:
  156. //
  157. // 1. a.dim() == b.dim()
  158. // 2. a.shape[:-2] broadcasts over b.shape[:-2]
  159. // 3. a.size(i) <= b.size(i) for i=0,..., a.dim() - 3 (only for batch dimensions)
  160. //
  161. // MAGMA/LAPACK modify tensor `a` in-place, and the main goal of this method
  162. // is to be memory efficient, which means that if there exists an index i such that
  163. // a.shape[i] < b.shape[i], 0 <= i <= a.dim() - 3,
  164. // then instead of materializing copies of `a` in the broadcasted shape, we keep
  165. // a buffer copy of `a` along with flags that check whether specific batch dimension
  166. // indices for `a` were already accessed. If they were, we copy the data from the buffer
  167. // into `a`. The number of copies does not exceed
  168. // prod(max(a.shape[:-2], b.shape[:-2]) - a.shape[:-2] + 1)
  169. // and this value is attained by tensors with non-empty batch dimensions.
  170. //
  171. // func_t `f` is a callable that is being supplied with
  172. // scalar_t* a_working_ptr, scalar_t* b_working_ptr, int64_t a_linear_batch_idx.
  173. // a_working_ptr and b_working_ptr can directly be passed to LAPACK/MAGMA routines,
  174. // and a_linear_batch_idx is an index in the 3d representation which corresponds to
  175. // the memory a_working_ptr points to, in other words:
  176. // a_working_ptr == a.view({-1, a.size(-2), a.size(-1)}.select(0, a_linear_batch_idx).data_ptr<scalar_t>();
  177. // a_linear_batch_idx is useful to store metadata related to `a`, such as, for example,
  178. // its rank or singular values (see linalg_lstsq).
  179. template<typename scalar_t, typename func_t>
  180. void batch_iterator_with_broadcasting(const Tensor& a, const Tensor& b, const func_t& f) {
  181. IntArrayRef a_batch_sizes(a.sizes().data(), a.dim() - 2);
  182. IntArrayRef b_batch_sizes(b.sizes().data(), b.dim() - 2);
  183. auto a_linear_batch_idx = at::arange(batchCount(a)).view(a_batch_sizes);
  184. auto b_linear_batch_idx = at::arange(batchCount(b)).view(b_batch_sizes);
  185. TensorIterator iter = TensorIteratorConfig()
  186. .set_check_mem_overlap(false)
  187. .check_all_same_dtype(false)
  188. .resize_outputs(false)
  189. .add_output(b_linear_batch_idx)
  190. .add_input(a_linear_batch_idx)
  191. .build();
  192. auto m = a.size(-2);
  193. auto n = a.size(-1);
  194. auto a_3d = a.view({batchCount(a), m, n});
  195. auto b_3d = b.view({batchCount(b), b.size(-2), b.size(-1)});
  196. auto a_broadcasts_over_b = (a_batch_sizes != b_batch_sizes);
  197. Tensor a_buffer, a_was_accessed, a_buffer_3d;
  198. std::function<void(int64_t)> check_if_copy_needed_for_a
  199. = [](int64_t /*a_curr_linear_batch_idx*/){};
  200. if (a_broadcasts_over_b) {
  201. a_buffer = at::empty_strided(a.sizes(), a.strides(), a.options())
  202. .copy_(a);
  203. a_was_accessed = at::zeros(batchCount(a), at::kBool);
  204. a_buffer_3d = a_buffer.view({batchCount(a), m, n});
  205. check_if_copy_needed_for_a = [&](int64_t a_curr_linear_batch_idx) {
  206. auto* a_was_accessed_flag = a_was_accessed
  207. .select(0, a_curr_linear_batch_idx)
  208. .data_ptr<bool>();
  209. if (!(*a_was_accessed_flag)) {
  210. *a_was_accessed_flag = true;
  211. }
  212. else {
  213. a_3d.select(0, a_curr_linear_batch_idx)
  214. .copy_(a_buffer_3d.select(0, a_curr_linear_batch_idx));
  215. }
  216. };
  217. }
  218. auto loop = [&](char** data, const int64_t* strides, int64_t nelems) {
  219. auto* b_batch_idx_ptr = data[0];
  220. auto* a_batch_idx_ptr = data[1];
  221. for (const auto elem C10_UNUSED : c10::irange(nelems)) {
  222. auto b_curr_linear_batch_idx = *reinterpret_cast<int64_t*>(b_batch_idx_ptr);
  223. auto a_curr_linear_batch_idx = *reinterpret_cast<int64_t*>(a_batch_idx_ptr);
  224. check_if_copy_needed_for_a(a_curr_linear_batch_idx);
  225. auto* a_working_ptr = a_3d.select(0, a_curr_linear_batch_idx)
  226. .data_ptr<scalar_t>();
  227. auto* b_working_ptr = b_3d.select(0, b_curr_linear_batch_idx)
  228. .data_ptr<scalar_t>();
  229. f(a_working_ptr, b_working_ptr, a_curr_linear_batch_idx);
  230. b_batch_idx_ptr += strides[0];
  231. a_batch_idx_ptr += strides[1];
  232. }
  233. };
  234. iter.serial_for_each(loop, {0, batchCount(b)});
  235. }
  236. // Returns the epsilon value for floating types except half
  237. static inline double _get_epsilon(const ScalarType& sc_type) {
  238. switch (sc_type) {
  239. case at::ScalarType::Float:
  240. return static_cast<double>(std::numeric_limits<float>::epsilon());
  241. case at::ScalarType::Double:
  242. return std::numeric_limits<double>::epsilon();
  243. default:
  244. AT_ERROR("This function doesn't handle types other than float and double");
  245. }
  246. }
  247. // Validates input shapes and devices
  248. // for linear solve methods (solve, cholesky_solve, lu_solve, triangular_solve)
  249. static inline void linearSolveCheckInputs(const Tensor& self, const Tensor& A, const char* name) {
  250. TORCH_CHECK(self.device() == A.device(),
  251. "Expected b and A to be on the same device, but found b on ",
  252. self.device(), " and A on ", A.device(), " instead.");
  253. TORCH_CHECK(self.scalar_type() == A.scalar_type(),
  254. "Expected b and A to have the same dtype, but found b of type ",
  255. self.scalar_type(), " and A of type ", A.scalar_type(), " instead.");
  256. TORCH_CHECK(A.size(-1) == A.size(-2),
  257. "A must be batches of square matrices, "
  258. "but they are ", A.size(-2), " by ", A.size(-1), " matrices");
  259. TORCH_CHECK(A.size(-1) == self.size(-2),
  260. "Incompatible matrix sizes for ", name, ": each A "
  261. "matrix is ", A.size(-1), " by ", A.size(-1),
  262. " but each b matrix is ", self.size(-2), " by ", self.size(-1));
  263. }
  264. static inline void checkFloatingOrComplex(const Tensor& t, const char* const f_name, const bool allow_low_precision_dtypes=true) {
  265. auto dtype = t.scalar_type();
  266. TORCH_CHECK((at::isFloatingType(dtype) || at::isComplexType(dtype)),
  267. f_name, ": Expected a floating point or complex tensor as input. Got ", dtype);
  268. if (!allow_low_precision_dtypes) {
  269. TORCH_CHECK(dtype == kFloat || dtype == kDouble || dtype == kComplexFloat || dtype == kComplexDouble,
  270. f_name, ": Low precision dtypes not supported. Got ", dtype);
  271. }
  272. }
  273. // Checks if all the Tensors in a TensorList are of the same dimensions
  274. static inline void checkAllSameDim(TensorList tensors, int64_t dim) {
  275. for (auto &t : tensors) {
  276. TORCH_CHECK(t.dim() == dim, "Tensor dimension is ", t.dim(), ", expected ", dim, " instead.");
  277. }
  278. }
  279. static inline std::tuple<std::vector<int64_t>, std::vector<int64_t>> _linalg_broadcast_batch_dims(const Tensor& arg1, const Tensor& arg2) {
  280. // broadcast the batch dimensions of arg1 and arg2.
  281. IntArrayRef arg1_batch_sizes(arg1.sizes().data(), arg1.ndimension() - 2);
  282. IntArrayRef arg2_batch_sizes(arg2.sizes().data(), arg2.ndimension() - 2);
  283. std::vector<int64_t> expand_batch_portion = infer_size(arg1_batch_sizes, arg2_batch_sizes);
  284. std::vector<int64_t> arg1_expand_size({expand_batch_portion});
  285. arg1_expand_size.insert(arg1_expand_size.end(), { arg1.size(-2), arg1.size(-1) });
  286. std::vector<int64_t> arg2_expand_size({expand_batch_portion});
  287. arg2_expand_size.insert(arg2_expand_size.end(), { arg2.size(-2), arg2.size(-1) });
  288. return std::make_tuple(std::move(arg1_expand_size), std::move(arg2_expand_size));
  289. }
  290. static inline std::tuple<Tensor,Tensor> _linalg_broadcast_batch_dims(const Tensor& arg1, const Tensor& arg2, const char* name) {
  291. // If there's no name we assume we don't want to check the errors
  292. if (name != nullptr) {
  293. linearSolveCheckInputs(arg1, arg2, name);
  294. }
  295. std::vector<int64_t> arg1_expand_size, arg2_expand_size;
  296. std::tie(arg1_expand_size, arg2_expand_size) = at::native::_linalg_broadcast_batch_dims(arg1, arg2);
  297. auto arg1_broadcasted = arg1_expand_size == arg1.sizes() ? arg1 : arg1.expand(arg1_expand_size);
  298. auto arg2_broadcasted = arg2_expand_size == arg2.sizes() ? arg2 : arg2.expand(arg2_expand_size);
  299. return std::make_tuple(arg1_broadcasted, arg2_broadcasted);
  300. }
  301. static inline std::vector<int64_t> broadcast_batch_size(const Tensor& t1, const Tensor& t2, int64_t n_batch_dims) {
  302. IntArrayRef t1_batch_sizes(t1.sizes().data(), n_batch_dims);
  303. IntArrayRef t2_batch_sizes(t2.sizes().data(), n_batch_dims);
  304. auto broadcasted_batch_sizes = infer_size(t1_batch_sizes, t2_batch_sizes);
  305. return broadcasted_batch_sizes;
  306. }
  307. // Return a permutation with the given axes moved to the end.
  308. static inline Tensor _move_to_end(const Tensor& self, IntArrayRef axes) {
  309. const std::vector<int64_t> a = axes.vec();
  310. const int64_t ndim = self.ndimension();
  311. std::vector<int64_t> perm;
  312. for (const auto i : c10::irange(ndim)) {
  313. auto it = std::find(a.begin(), a.end(), i);
  314. if (it == a.end()) {
  315. perm.push_back(i);
  316. }
  317. }
  318. for (auto i : a) {
  319. perm.push_back(i);
  320. }
  321. TORCH_CHECK((int64_t)perm.size() == ndim,
  322. "duplicate or invalid axis in 'dim' argument for tensor with ndim==", ndim);
  323. return self.permute(perm);
  324. }
  325. // parse the "mode" param in linalg_qr: return a tuple of bools (compute_q, reduced)
  326. static inline std::tuple<bool, bool> _parse_qr_mode(c10::string_view mode) {
  327. bool compute_q;
  328. bool reduced;
  329. if (mode == "reduced") {
  330. compute_q = true;
  331. reduced = true;
  332. } else if (mode == "complete") {
  333. compute_q = true;
  334. reduced = false;
  335. } else if (mode == "r") {
  336. compute_q = false;
  337. reduced = true; // this is actually irrelevant in this mode
  338. } else {
  339. TORCH_CHECK(false, "qr received unrecognized mode '", mode,
  340. "' but expected one of 'reduced' (default), 'r', or 'complete'");
  341. }
  342. return std::make_tuple(compute_q, reduced);
  343. }
  344. // Function to compute sizes, strides and the extra columns for the Q matrix in the QR Decomposition
  345. static inline std::tuple<DimVector, DimVector, int64_t> _compute_geometry_for_Q(
  346. const Tensor& input,
  347. bool reduced) {
  348. int64_t m = input.size(-2), n = input.size(-1);
  349. int64_t n_columns_q;
  350. // We need to compute the required size of Q based on the `reduced` option
  351. DimVector q_sizes(input.sizes());
  352. if (!reduced && m > n) {
  353. q_sizes[input.dim() - 1] = m;
  354. n_columns_q = m;
  355. } else {
  356. q_sizes[input.dim() - 1] = n;
  357. n_columns_q = std::min(m, n);
  358. }
  359. auto q_strides = batched_matrix_contiguous_strides(q_sizes, /*f-contig*/true);
  360. return std::make_tuple(q_sizes, q_strides, n_columns_q);
  361. }
  362. static inline bool svd_uses_cusolver(const Tensor& A) {
  363. // if cusolver is available, it is used unconditionally
  364. return A.is_cuda()
  365. && at::globalContext().hasCuSOLVER()
  366. && at::globalContext().linalgPreferredBackend() != at::LinalgBackend::Magma;
  367. }
  368. // Function used instead of .to so that the original strides are retained
  369. // .to doesn't retain strides and make the output tensor contiguous
  370. static inline Tensor same_stride_to(const Tensor& original_tensor, const at::TensorOptions& options) {
  371. auto strided_to = at::empty_strided(original_tensor.sizes(),
  372. original_tensor.strides(),
  373. options);
  374. strided_to.copy_(original_tensor);
  375. return strided_to;
  376. }
  377. // Creates a dimension permutation array that can be given to `at::permute()`, which will shift
  378. // the two specified dimensions to the end of a tensor, without changing the order of
  379. // the other dimensions. `dim1` will be placed at the very end, and `dim0` will be
  380. // placed just to the left of it.
  381. //
  382. // For instance, given a 4-D tensor, dimensions 1 and 3 can be shifted to the end by
  383. // calling `create_dim_backshift_permutation(1, 3, 4)`. The resulting vector will
  384. // be `vec(0, 2, 1, 3)`.
  385. static inline std::vector<int64_t> create_dim_backshift_permutation(int64_t dim0, int64_t dim1, int64_t ndim) {
  386. TORCH_CHECK(
  387. (dim0 != dim1) && (dim0 < ndim) && (dim0 >= 0) && (dim1 < ndim) && (dim1 >= 0),
  388. "duplicate or invalid dimensions");
  389. std::vector<int64_t> permutation(ndim);
  390. int64_t cur_permuted_dim = 0;
  391. for (const auto dim_ind : c10::irange(ndim)) {
  392. if ((dim_ind != dim0) && (dim_ind != dim1)) {
  393. permutation[cur_permuted_dim++] = dim_ind;
  394. }
  395. }
  396. permutation[cur_permuted_dim++] = dim0;
  397. permutation[cur_permuted_dim] = dim1;
  398. return permutation;
  399. }
  400. // Creates a dimension permutation array that can be given to `at::permute()`, which
  401. // will reverse a given permutation.
  402. // The reverse permutation array is created by swapping the indices and their
  403. // associated values from the given permutation array.
  404. static inline std::vector<int64_t> create_reverse_permutation(std::vector<int64_t> permutation) {
  405. int64_t ndim = permutation.size();
  406. std::vector<int64_t> reverse_permutation(ndim);
  407. for (const auto dim_ind : c10::irange(ndim)) {
  408. reverse_permutation[permutation[dim_ind]] = dim_ind;
  409. }
  410. return reverse_permutation;
  411. }
  412. // Compute R-work array size for MAGMA/LAPACK cgesdd/zgesdd
  413. // See https://github.com/Reference-LAPACK/lapack/blob/122506cd8b6ce050a200920c3d4c0b153b150fd8/SRC/cgesdd.f#L186
  414. static inline int64_t computeLRWorkDim(const char jobz, int64_t m, int64_t n) {
  415. auto mn = std::min(m, n);
  416. auto mx = std::max(m, n);
  417. if (jobz == 'N') {
  418. #ifdef __APPLE__
  419. // According to `vecLib.framework/Headers/clapack.h` Accelerate.framework is based on LAPACK 3.2.1
  420. return 7 * mn;
  421. #else
  422. // These setting is valid for on LAPACK 3.6+
  423. return 5 * mn;
  424. #endif
  425. }
  426. if (mx > 10 * mn) {
  427. return 5 * mn * mn + 5 * mn;
  428. }
  429. return std::max(5 * mn * mn + 5 * mn, 2 * mx * mn + 2 * mn * mn + mn);
  430. }
  431. // This function checks whether the uplo argument input is valid
  432. // Allowed strings are "u", "U", "l", "L"
  433. static inline void checkUplo(const c10::string_view uplo) {
  434. // To use std::toupper safely with plain chars (or signed chars), the argument should first be converted to unsigned char
  435. char uplo_uppercase = static_cast<char>(std::toupper(static_cast<unsigned char>(uplo[0])));
  436. TORCH_CHECK(uplo.size() == 1 && (uplo_uppercase == 'U' || uplo_uppercase == 'L'),
  437. "Expected UPLO argument to be 'L' or 'U', but got ", uplo);
  438. }
  439. static inline void checkSameDevice(const std::string& fn_name, Tensor result, Tensor input, const std::string& result_name = "result") {
  440. TORCH_CHECK(
  441. result.device() == input.device(),
  442. fn_name,
  443. ": Expected ", result_name, " and input tensors to be on the same device, but got ",
  444. result_name, " on ", result.device(), " and input on ", input.device());
  445. }
  446. // Check the dtype of result and input tensors (for _out variants).
  447. // Most linear algebra functions have the same dtype for input and output
  448. // (either floating or complex type input), so we can check whether input's dtype can be casted to result's dtype.
  449. // According to https://github.com/pytorch/pytorch/wiki/Developer-FAQ#how-does-out-work-in-pytorch
  450. // c10::canCast is used for checking the "safe copy" dtype requirements.
  451. static inline void checkLinalgCompatibleDtype(const std::string& fn_name, Tensor result, Tensor input, const std::string& result_name = "result") {
  452. bool can_cast = c10::canCast(input.scalar_type(), result.scalar_type());
  453. TORCH_CHECK(
  454. can_cast,
  455. fn_name,
  456. ": Expected ", result_name, " to be safely castable from ", input.scalar_type(), " dtype, but got ",
  457. result_name, " with dtype ", result.scalar_type());
  458. }
  459. // Alternatively, we can check whether the specific expected output type (result_type) can be safely casted to out tensor dtype (out_type)
  460. static inline void checkLinalgCompatibleDtype(const std::string& fn_name, ScalarType out_type, ScalarType result_type, const std::string& out_name = "result") {
  461. bool can_cast = c10::canCast(result_type, out_type);
  462. TORCH_CHECK(
  463. can_cast,
  464. fn_name,
  465. ": Expected ", out_name, " to be safely castable from ", result_type, " dtype, but got ",
  466. out_name, " with dtype ", out_type);
  467. }
  468. static inline void checkNotComplexTolerance(const Tensor& tol, const c10::string_view f_name, const c10::string_view tol_name) {
  469. TORCH_CHECK(!at::isComplexType(tol.scalar_type()),
  470. f_name, ": ", tol_name, " tensor of complex type is not supported. Got ", tol.scalar_type());
  471. }
  472. /*
  473. Two types of 'other' tensors are supported when solving
  474. a system of linear equations matmul(input, x) = other:
  475. * 1-dimensional (1D) tensor or batch of 1D tensors (vector case)
  476. * 2-dimensional (2D) tensor or batch of 2D tensors (matrix case).
  477. The original torch.solve supported only the matrix case, while NumPy works for both cases.
  478. For the batched input we need to be able to distinguish them.
  479. Let input.shape = (batch_dimensions, m, n), then 'other' is of vector type if other.shape == (batch_dimensions, m).
  480. This rule is compatible with NumPy, see https://github.com/numpy/numpy/blob/v1.20.0/numpy/linalg/linalg.py#L384-L389
  481. */
  482. static inline bool linalg_solve_is_vector_rhs(const Tensor& input, const Tensor& other) {
  483. auto expected_batched_rhs_shape = IntArrayRef(input.sizes().data(), input.dim() - 1); // input.shape[:-1]
  484. bool vector_case = other.dim() == 1 || (input.dim() - 1 == other.dim() && other.sizes().equals(expected_batched_rhs_shape));
  485. return vector_case;
  486. }
  487. /*
  488. Computes linear indices for a tensor with original_shape to access its elements like it was a materialized broadcast tensor.
  489. */
  490. static inline Tensor get_linear_indices(int64_t numel, IntArrayRef original_shape, IntArrayRef broadcast_shape) {
  491. TensorOptions options = at::TensorOptions().dtype(at::kLong).device(at::kCPU);
  492. return at::arange(numel, options).view(original_shape).broadcast_to(broadcast_shape).contiguous();
  493. }
  494. class BroadcastLinearIndices {
  495. private:
  496. Tensor linear_indices_;
  497. bool is_broadcasting_;
  498. public:
  499. BroadcastLinearIndices(
  500. int64_t numel,
  501. IntArrayRef original_shape,
  502. IntArrayRef broadcast_shape) : is_broadcasting_(!original_shape.equals(broadcast_shape)) {
  503. // The assumption is that the broadcast_shape is a materialized broadcast
  504. // shape of the original_shape. We need to compute the linear indices
  505. // compatible with the original_shape to access the elements in the original
  506. // tensor corresponding to the broadcast tensor.
  507. if (is_broadcasting_) {
  508. linear_indices_ =
  509. get_linear_indices(numel, original_shape, broadcast_shape);
  510. }
  511. }
  512. int64_t operator()(int64_t broadcast_linear_index) {
  513. return is_broadcasting_
  514. ? linear_indices_.data_ptr<int64_t>()[broadcast_linear_index]
  515. : broadcast_linear_index;
  516. }
  517. };
  518. static inline bool is_blas_compatible_column_major_order(const Tensor& input) {
  519. IntArrayRef input_strides = input.strides();
  520. IntArrayRef input_sizes = input.sizes();
  521. auto ndim = input.dim();
  522. TORCH_INTERNAL_ASSERT_DEBUG_ONLY(ndim >= 2);
  523. if (ndim > 3) {
  524. return input.transpose(-2, -1).is_contiguous();
  525. }
  526. auto leading_dimension = input_strides[ndim - 1];
  527. auto rows = input_sizes[ndim - 2];
  528. bool batch_stride_compatible = true;
  529. if (ndim == 3) {
  530. auto cols = input_sizes[ndim - 1];
  531. batch_stride_compatible =
  532. input_strides[ndim - 3] >= leading_dimension * cols;
  533. }
  534. return (input_strides[ndim - 2] == 1) &&
  535. (leading_dimension >= std::max<int64_t>(1, rows)) &&
  536. batch_stride_compatible;
  537. }
  538. static inline bool is_blas_compatible_row_major_order(const Tensor& input) {
  539. IntArrayRef input_strides = input.strides();
  540. IntArrayRef input_sizes = input.sizes();
  541. auto ndim = input.dim();
  542. TORCH_INTERNAL_ASSERT_DEBUG_ONLY(ndim >= 2);
  543. if (ndim > 3) {
  544. return input.is_contiguous();
  545. }
  546. auto leading_dimension = input_strides[ndim - 2];
  547. auto cols = input_sizes[ndim - 1];
  548. bool batch_stride_compatible = true;
  549. if (ndim == 3) {
  550. auto rows = input_sizes[ndim - 2];
  551. batch_stride_compatible =
  552. input_strides[ndim - 3] >= leading_dimension * rows;
  553. }
  554. return (input_strides[ndim - 1] == 1) &&
  555. (leading_dimension >= std::max<int64_t>(1, cols)) &&
  556. batch_stride_compatible;
  557. }
  558. }} // namespace at::native