partitioned_matrix_view_impl.h 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660
  1. // Ceres Solver - A fast non-linear least squares minimizer
  2. // Copyright 2023 Google Inc. All rights reserved.
  3. // http://ceres-solver.org/
  4. //
  5. // Redistribution and use in source and binary forms, with or without
  6. // modification, are permitted provided that the following conditions are met:
  7. //
  8. // * Redistributions of source code must retain the above copyright notice,
  9. // this list of conditions and the following disclaimer.
  10. // * Redistributions in binary form must reproduce the above copyright notice,
  11. // this list of conditions and the following disclaimer in the documentation
  12. // and/or other materials provided with the distribution.
  13. // * Neither the name of Google Inc. nor the names of its contributors may be
  14. // used to endorse or promote products derived from this software without
  15. // specific prior written permission.
  16. //
  17. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  18. // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  19. // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  20. // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  21. // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  22. // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  23. // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  24. // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  25. // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  26. // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  27. // POSSIBILITY OF SUCH DAMAGE.
  28. //
  29. // Author: sameeragarwal@google.com (Sameer Agarwal)
  30. #include <algorithm>
  31. #include <cstring>
  32. #include <memory>
  33. #include <vector>
  34. #include "ceres/block_sparse_matrix.h"
  35. #include "ceres/block_structure.h"
  36. #include "ceres/internal/eigen.h"
  37. #include "ceres/parallel_for.h"
  38. #include "ceres/partition_range_for_parallel_for.h"
  39. #include "ceres/partitioned_matrix_view.h"
  40. #include "ceres/small_blas.h"
  41. #include "glog/logging.h"
  42. namespace ceres::internal {
  43. template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
  44. PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
  45. PartitionedMatrixView(const LinearSolver::Options& options,
  46. const BlockSparseMatrix& matrix)
  47. : options_(options), matrix_(matrix) {
  48. const CompressedRowBlockStructure* bs = matrix_.block_structure();
  49. CHECK(bs != nullptr);
  50. num_col_blocks_e_ = options_.elimination_groups[0];
  51. num_col_blocks_f_ = bs->cols.size() - num_col_blocks_e_;
  52. // Compute the number of row blocks in E. The number of row blocks
  53. // in E maybe less than the number of row blocks in the input matrix
  54. // as some of the row blocks at the bottom may not have any
  55. // e_blocks. For a definition of what an e_block is, please see
  56. // schur_complement_solver.h
  57. num_row_blocks_e_ = 0;
  58. for (const auto& row : bs->rows) {
  59. const std::vector<Cell>& cells = row.cells;
  60. if (cells[0].block_id < num_col_blocks_e_) {
  61. ++num_row_blocks_e_;
  62. }
  63. }
  64. // Compute the number of columns in E and F.
  65. num_cols_e_ = 0;
  66. num_cols_f_ = 0;
  67. for (int c = 0; c < bs->cols.size(); ++c) {
  68. const Block& block = bs->cols[c];
  69. if (c < num_col_blocks_e_) {
  70. num_cols_e_ += block.size;
  71. } else {
  72. num_cols_f_ += block.size;
  73. }
  74. }
  75. CHECK_EQ(num_cols_e_ + num_cols_f_, matrix_.num_cols());
  76. auto transpose_bs = matrix_.transpose_block_structure();
  77. const int num_threads = options_.num_threads;
  78. if (transpose_bs != nullptr && num_threads > 1) {
  79. int kMaxPartitions = num_threads * 4;
  80. e_cols_partition_ = PartitionRangeForParallelFor(
  81. 0,
  82. num_col_blocks_e_,
  83. kMaxPartitions,
  84. transpose_bs->rows.data(),
  85. [](const CompressedRow& row) { return row.cumulative_nnz; });
  86. f_cols_partition_ = PartitionRangeForParallelFor(
  87. num_col_blocks_e_,
  88. num_col_blocks_e_ + num_col_blocks_f_,
  89. kMaxPartitions,
  90. transpose_bs->rows.data(),
  91. [](const CompressedRow& row) { return row.cumulative_nnz; });
  92. }
  93. }
  94. // The next four methods don't seem to be particularly cache
  95. // friendly. This is an artifact of how the BlockStructure of the
  96. // input matrix is constructed. These methods will benefit from
  97. // multithreading as well as improved data layout.
  98. template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
  99. void PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
  100. RightMultiplyAndAccumulateE(const double* x, double* y) const {
  101. // Iterate over the first num_row_blocks_e_ row blocks, and multiply
  102. // by the first cell in each row block.
  103. auto bs = matrix_.block_structure();
  104. const double* values = matrix_.values();
  105. ParallelFor(options_.context,
  106. 0,
  107. num_row_blocks_e_,
  108. options_.num_threads,
  109. [values, bs, x, y](int row_block_id) {
  110. const Cell& cell = bs->rows[row_block_id].cells[0];
  111. const int row_block_pos = bs->rows[row_block_id].block.position;
  112. const int row_block_size = bs->rows[row_block_id].block.size;
  113. const int col_block_id = cell.block_id;
  114. const int col_block_pos = bs->cols[col_block_id].position;
  115. const int col_block_size = bs->cols[col_block_id].size;
  116. // clang-format off
  117. MatrixVectorMultiply<kRowBlockSize, kEBlockSize, 1>(
  118. values + cell.position, row_block_size, col_block_size,
  119. x + col_block_pos,
  120. y + row_block_pos);
  121. // clang-format on
  122. });
  123. }
  124. template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
  125. void PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
  126. RightMultiplyAndAccumulateF(const double* x, double* y) const {
  127. // Iterate over row blocks, and if the row block is in E, then
  128. // multiply by all the cells except the first one which is of type
  129. // E. If the row block is not in E (i.e its in the bottom
  130. // num_row_blocks - num_row_blocks_e row blocks), then all the cells
  131. // are of type F and multiply by them all.
  132. const CompressedRowBlockStructure* bs = matrix_.block_structure();
  133. const int num_row_blocks = bs->rows.size();
  134. const int num_cols_e = num_cols_e_;
  135. const double* values = matrix_.values();
  136. ParallelFor(options_.context,
  137. 0,
  138. num_row_blocks_e_,
  139. options_.num_threads,
  140. [values, bs, num_cols_e, x, y](int row_block_id) {
  141. const int row_block_pos = bs->rows[row_block_id].block.position;
  142. const int row_block_size = bs->rows[row_block_id].block.size;
  143. const auto& cells = bs->rows[row_block_id].cells;
  144. for (int c = 1; c < cells.size(); ++c) {
  145. const int col_block_id = cells[c].block_id;
  146. const int col_block_pos = bs->cols[col_block_id].position;
  147. const int col_block_size = bs->cols[col_block_id].size;
  148. // clang-format off
  149. MatrixVectorMultiply<kRowBlockSize, kFBlockSize, 1>(
  150. values + cells[c].position, row_block_size, col_block_size,
  151. x + col_block_pos - num_cols_e,
  152. y + row_block_pos);
  153. // clang-format on
  154. }
  155. });
  156. ParallelFor(options_.context,
  157. num_row_blocks_e_,
  158. num_row_blocks,
  159. options_.num_threads,
  160. [values, bs, num_cols_e, x, y](int row_block_id) {
  161. const int row_block_pos = bs->rows[row_block_id].block.position;
  162. const int row_block_size = bs->rows[row_block_id].block.size;
  163. const auto& cells = bs->rows[row_block_id].cells;
  164. for (const auto& cell : cells) {
  165. const int col_block_id = cell.block_id;
  166. const int col_block_pos = bs->cols[col_block_id].position;
  167. const int col_block_size = bs->cols[col_block_id].size;
  168. // clang-format off
  169. MatrixVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
  170. values + cell.position, row_block_size, col_block_size,
  171. x + col_block_pos - num_cols_e,
  172. y + row_block_pos);
  173. // clang-format on
  174. }
  175. });
  176. }
  177. template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
  178. void PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
  179. LeftMultiplyAndAccumulateE(const double* x, double* y) const {
  180. if (!num_col_blocks_e_) return;
  181. if (!num_row_blocks_e_) return;
  182. if (options_.num_threads == 1) {
  183. LeftMultiplyAndAccumulateESingleThreaded(x, y);
  184. } else {
  185. CHECK(options_.context != nullptr);
  186. LeftMultiplyAndAccumulateEMultiThreaded(x, y);
  187. }
  188. }
  189. template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
  190. void PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
  191. LeftMultiplyAndAccumulateESingleThreaded(const double* x, double* y) const {
  192. const CompressedRowBlockStructure* bs = matrix_.block_structure();
  193. // Iterate over the first num_row_blocks_e_ row blocks, and multiply
  194. // by the first cell in each row block.
  195. const double* values = matrix_.values();
  196. for (int r = 0; r < num_row_blocks_e_; ++r) {
  197. const Cell& cell = bs->rows[r].cells[0];
  198. const int row_block_pos = bs->rows[r].block.position;
  199. const int row_block_size = bs->rows[r].block.size;
  200. const int col_block_id = cell.block_id;
  201. const int col_block_pos = bs->cols[col_block_id].position;
  202. const int col_block_size = bs->cols[col_block_id].size;
  203. // clang-format off
  204. MatrixTransposeVectorMultiply<kRowBlockSize, kEBlockSize, 1>(
  205. values + cell.position, row_block_size, col_block_size,
  206. x + row_block_pos,
  207. y + col_block_pos);
  208. // clang-format on
  209. }
  210. }
  211. template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
  212. void PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
  213. LeftMultiplyAndAccumulateEMultiThreaded(const double* x, double* y) const {
  214. auto transpose_bs = matrix_.transpose_block_structure();
  215. CHECK(transpose_bs != nullptr);
  216. // Local copies of class members in order to avoid capturing pointer to the
  217. // whole object in lambda function
  218. auto values = matrix_.values();
  219. const int num_row_blocks_e = num_row_blocks_e_;
  220. ParallelFor(
  221. options_.context,
  222. 0,
  223. num_col_blocks_e_,
  224. options_.num_threads,
  225. [values, transpose_bs, num_row_blocks_e, x, y](int row_block_id) {
  226. int row_block_pos = transpose_bs->rows[row_block_id].block.position;
  227. int row_block_size = transpose_bs->rows[row_block_id].block.size;
  228. auto& cells = transpose_bs->rows[row_block_id].cells;
  229. for (auto& cell : cells) {
  230. const int col_block_id = cell.block_id;
  231. const int col_block_size = transpose_bs->cols[col_block_id].size;
  232. const int col_block_pos = transpose_bs->cols[col_block_id].position;
  233. if (col_block_id >= num_row_blocks_e) break;
  234. MatrixTransposeVectorMultiply<kRowBlockSize, kEBlockSize, 1>(
  235. values + cell.position,
  236. col_block_size,
  237. row_block_size,
  238. x + col_block_pos,
  239. y + row_block_pos);
  240. }
  241. },
  242. e_cols_partition());
  243. }
  244. template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
  245. void PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
  246. LeftMultiplyAndAccumulateF(const double* x, double* y) const {
  247. if (!num_col_blocks_f_) return;
  248. if (options_.num_threads == 1) {
  249. LeftMultiplyAndAccumulateFSingleThreaded(x, y);
  250. } else {
  251. CHECK(options_.context != nullptr);
  252. LeftMultiplyAndAccumulateFMultiThreaded(x, y);
  253. }
  254. }
  255. template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
  256. void PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
  257. LeftMultiplyAndAccumulateFSingleThreaded(const double* x, double* y) const {
  258. const CompressedRowBlockStructure* bs = matrix_.block_structure();
  259. // Iterate over row blocks, and if the row block is in E, then
  260. // multiply by all the cells except the first one which is of type
  261. // E. If the row block is not in E (i.e its in the bottom
  262. // num_row_blocks - num_row_blocks_e row blocks), then all the cells
  263. // are of type F and multiply by them all.
  264. const double* values = matrix_.values();
  265. for (int r = 0; r < num_row_blocks_e_; ++r) {
  266. const int row_block_pos = bs->rows[r].block.position;
  267. const int row_block_size = bs->rows[r].block.size;
  268. const std::vector<Cell>& cells = bs->rows[r].cells;
  269. for (int c = 1; c < cells.size(); ++c) {
  270. const int col_block_id = cells[c].block_id;
  271. const int col_block_pos = bs->cols[col_block_id].position;
  272. const int col_block_size = bs->cols[col_block_id].size;
  273. // clang-format off
  274. MatrixTransposeVectorMultiply<kRowBlockSize, kFBlockSize, 1>(
  275. values + cells[c].position, row_block_size, col_block_size,
  276. x + row_block_pos,
  277. y + col_block_pos - num_cols_e_);
  278. // clang-format on
  279. }
  280. }
  281. for (int r = num_row_blocks_e_; r < bs->rows.size(); ++r) {
  282. const int row_block_pos = bs->rows[r].block.position;
  283. const int row_block_size = bs->rows[r].block.size;
  284. const std::vector<Cell>& cells = bs->rows[r].cells;
  285. for (const auto& cell : cells) {
  286. const int col_block_id = cell.block_id;
  287. const int col_block_pos = bs->cols[col_block_id].position;
  288. const int col_block_size = bs->cols[col_block_id].size;
  289. // clang-format off
  290. MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
  291. values + cell.position, row_block_size, col_block_size,
  292. x + row_block_pos,
  293. y + col_block_pos - num_cols_e_);
  294. // clang-format on
  295. }
  296. }
  297. }
  298. template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
  299. void PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
  300. LeftMultiplyAndAccumulateFMultiThreaded(const double* x, double* y) const {
  301. auto transpose_bs = matrix_.transpose_block_structure();
  302. CHECK(transpose_bs != nullptr);
  303. // Local copies of class members in order to avoid capturing pointer to the
  304. // whole object in lambda function
  305. auto values = matrix_.values();
  306. const int num_row_blocks_e = num_row_blocks_e_;
  307. const int num_cols_e = num_cols_e_;
  308. ParallelFor(
  309. options_.context,
  310. num_col_blocks_e_,
  311. num_col_blocks_e_ + num_col_blocks_f_,
  312. options_.num_threads,
  313. [values, transpose_bs, num_row_blocks_e, num_cols_e, x, y](
  314. int row_block_id) {
  315. int row_block_pos = transpose_bs->rows[row_block_id].block.position;
  316. int row_block_size = transpose_bs->rows[row_block_id].block.size;
  317. auto& cells = transpose_bs->rows[row_block_id].cells;
  318. const int num_cells = cells.size();
  319. int cell_idx = 0;
  320. for (; cell_idx < num_cells; ++cell_idx) {
  321. auto& cell = cells[cell_idx];
  322. const int col_block_id = cell.block_id;
  323. const int col_block_size = transpose_bs->cols[col_block_id].size;
  324. const int col_block_pos = transpose_bs->cols[col_block_id].position;
  325. if (col_block_id >= num_row_blocks_e) break;
  326. MatrixTransposeVectorMultiply<kRowBlockSize, kFBlockSize, 1>(
  327. values + cell.position,
  328. col_block_size,
  329. row_block_size,
  330. x + col_block_pos,
  331. y + row_block_pos - num_cols_e);
  332. }
  333. for (; cell_idx < num_cells; ++cell_idx) {
  334. auto& cell = cells[cell_idx];
  335. const int col_block_id = cell.block_id;
  336. const int col_block_size = transpose_bs->cols[col_block_id].size;
  337. const int col_block_pos = transpose_bs->cols[col_block_id].position;
  338. MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
  339. values + cell.position,
  340. col_block_size,
  341. row_block_size,
  342. x + col_block_pos,
  343. y + row_block_pos - num_cols_e);
  344. }
  345. },
  346. f_cols_partition());
  347. }
  348. // Given a range of columns blocks of a matrix m, compute the block
  349. // structure of the block diagonal of the matrix m(:,
  350. // start_col_block:end_col_block)'m(:, start_col_block:end_col_block)
  351. // and return a BlockSparseMatrix with this block structure. The
  352. // caller owns the result.
  353. template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
  354. std::unique_ptr<BlockSparseMatrix>
  355. PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
  356. CreateBlockDiagonalMatrixLayout(int start_col_block,
  357. int end_col_block) const {
  358. const CompressedRowBlockStructure* bs = matrix_.block_structure();
  359. auto* block_diagonal_structure = new CompressedRowBlockStructure;
  360. int block_position = 0;
  361. int diagonal_cell_position = 0;
  362. // Iterate over the column blocks, creating a new diagonal block for
  363. // each column block.
  364. for (int c = start_col_block; c < end_col_block; ++c) {
  365. const Block& block = bs->cols[c];
  366. block_diagonal_structure->cols.emplace_back();
  367. Block& diagonal_block = block_diagonal_structure->cols.back();
  368. diagonal_block.size = block.size;
  369. diagonal_block.position = block_position;
  370. block_diagonal_structure->rows.emplace_back();
  371. CompressedRow& row = block_diagonal_structure->rows.back();
  372. row.block = diagonal_block;
  373. row.cells.emplace_back();
  374. Cell& cell = row.cells.back();
  375. cell.block_id = c - start_col_block;
  376. cell.position = diagonal_cell_position;
  377. block_position += block.size;
  378. diagonal_cell_position += block.size * block.size;
  379. }
  380. // Build a BlockSparseMatrix with the just computed block
  381. // structure.
  382. return std::make_unique<BlockSparseMatrix>(block_diagonal_structure);
  383. }
  384. template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
  385. std::unique_ptr<BlockSparseMatrix>
  386. PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
  387. CreateBlockDiagonalEtE() const {
  388. std::unique_ptr<BlockSparseMatrix> block_diagonal =
  389. CreateBlockDiagonalMatrixLayout(0, num_col_blocks_e_);
  390. UpdateBlockDiagonalEtE(block_diagonal.get());
  391. return block_diagonal;
  392. }
  393. template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
  394. std::unique_ptr<BlockSparseMatrix>
  395. PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
  396. CreateBlockDiagonalFtF() const {
  397. std::unique_ptr<BlockSparseMatrix> block_diagonal =
  398. CreateBlockDiagonalMatrixLayout(num_col_blocks_e_,
  399. num_col_blocks_e_ + num_col_blocks_f_);
  400. UpdateBlockDiagonalFtF(block_diagonal.get());
  401. return block_diagonal;
  402. }
  403. // Similar to the code in RightMultiplyAndAccumulateE, except instead of the
  404. // matrix vector multiply its an outer product.
  405. //
  406. // block_diagonal = block_diagonal(E'E)
  407. //
  408. template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
  409. void PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
  410. UpdateBlockDiagonalEtESingleThreaded(
  411. BlockSparseMatrix* block_diagonal) const {
  412. auto bs = matrix_.block_structure();
  413. auto block_diagonal_structure = block_diagonal->block_structure();
  414. block_diagonal->SetZero();
  415. const double* values = matrix_.values();
  416. for (int r = 0; r < num_row_blocks_e_; ++r) {
  417. const Cell& cell = bs->rows[r].cells[0];
  418. const int row_block_size = bs->rows[r].block.size;
  419. const int block_id = cell.block_id;
  420. const int col_block_size = bs->cols[block_id].size;
  421. const int cell_position =
  422. block_diagonal_structure->rows[block_id].cells[0].position;
  423. // clang-format off
  424. MatrixTransposeMatrixMultiply
  425. <kRowBlockSize, kEBlockSize, kRowBlockSize, kEBlockSize, 1>(
  426. values + cell.position, row_block_size, col_block_size,
  427. values + cell.position, row_block_size, col_block_size,
  428. block_diagonal->mutable_values() + cell_position,
  429. 0, 0, col_block_size, col_block_size);
  430. // clang-format on
  431. }
  432. }
  433. template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
  434. void PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
  435. UpdateBlockDiagonalEtEMultiThreaded(
  436. BlockSparseMatrix* block_diagonal) const {
  437. auto transpose_block_structure = matrix_.transpose_block_structure();
  438. CHECK(transpose_block_structure != nullptr);
  439. auto block_diagonal_structure = block_diagonal->block_structure();
  440. const double* values = matrix_.values();
  441. double* values_diagonal = block_diagonal->mutable_values();
  442. ParallelFor(
  443. options_.context,
  444. 0,
  445. num_col_blocks_e_,
  446. options_.num_threads,
  447. [values,
  448. transpose_block_structure,
  449. values_diagonal,
  450. block_diagonal_structure](int col_block_id) {
  451. int cell_position =
  452. block_diagonal_structure->rows[col_block_id].cells[0].position;
  453. double* cell_values = values_diagonal + cell_position;
  454. int col_block_size =
  455. transpose_block_structure->rows[col_block_id].block.size;
  456. auto& cells = transpose_block_structure->rows[col_block_id].cells;
  457. MatrixRef(cell_values, col_block_size, col_block_size).setZero();
  458. for (auto& c : cells) {
  459. int row_block_size = transpose_block_structure->cols[c.block_id].size;
  460. // clang-format off
  461. MatrixTransposeMatrixMultiply<kRowBlockSize, kEBlockSize, kRowBlockSize, kEBlockSize, 1>(
  462. values + c.position, row_block_size, col_block_size,
  463. values + c.position, row_block_size, col_block_size,
  464. cell_values, 0, 0, col_block_size, col_block_size);
  465. // clang-format on
  466. }
  467. },
  468. e_cols_partition_);
  469. }
  470. template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
  471. void PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
  472. UpdateBlockDiagonalEtE(BlockSparseMatrix* block_diagonal) const {
  473. if (options_.num_threads == 1) {
  474. UpdateBlockDiagonalEtESingleThreaded(block_diagonal);
  475. } else {
  476. CHECK(options_.context != nullptr);
  477. UpdateBlockDiagonalEtEMultiThreaded(block_diagonal);
  478. }
  479. }
  480. // Similar to the code in RightMultiplyAndAccumulateF, except instead of the
  481. // matrix vector multiply its an outer product.
  482. //
  483. // block_diagonal = block_diagonal(F'F)
  484. //
  485. template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
  486. void PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
  487. UpdateBlockDiagonalFtFSingleThreaded(
  488. BlockSparseMatrix* block_diagonal) const {
  489. auto bs = matrix_.block_structure();
  490. auto block_diagonal_structure = block_diagonal->block_structure();
  491. block_diagonal->SetZero();
  492. const double* values = matrix_.values();
  493. for (int r = 0; r < num_row_blocks_e_; ++r) {
  494. const int row_block_size = bs->rows[r].block.size;
  495. const std::vector<Cell>& cells = bs->rows[r].cells;
  496. for (int c = 1; c < cells.size(); ++c) {
  497. const int col_block_id = cells[c].block_id;
  498. const int col_block_size = bs->cols[col_block_id].size;
  499. const int diagonal_block_id = col_block_id - num_col_blocks_e_;
  500. const int cell_position =
  501. block_diagonal_structure->rows[diagonal_block_id].cells[0].position;
  502. // clang-format off
  503. MatrixTransposeMatrixMultiply
  504. <kRowBlockSize, kFBlockSize, kRowBlockSize, kFBlockSize, 1>(
  505. values + cells[c].position, row_block_size, col_block_size,
  506. values + cells[c].position, row_block_size, col_block_size,
  507. block_diagonal->mutable_values() + cell_position,
  508. 0, 0, col_block_size, col_block_size);
  509. // clang-format on
  510. }
  511. }
  512. for (int r = num_row_blocks_e_; r < bs->rows.size(); ++r) {
  513. const int row_block_size = bs->rows[r].block.size;
  514. const std::vector<Cell>& cells = bs->rows[r].cells;
  515. for (const auto& cell : cells) {
  516. const int col_block_id = cell.block_id;
  517. const int col_block_size = bs->cols[col_block_id].size;
  518. const int diagonal_block_id = col_block_id - num_col_blocks_e_;
  519. const int cell_position =
  520. block_diagonal_structure->rows[diagonal_block_id].cells[0].position;
  521. // clang-format off
  522. MatrixTransposeMatrixMultiply
  523. <Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, 1>(
  524. values + cell.position, row_block_size, col_block_size,
  525. values + cell.position, row_block_size, col_block_size,
  526. block_diagonal->mutable_values() + cell_position,
  527. 0, 0, col_block_size, col_block_size);
  528. // clang-format on
  529. }
  530. }
  531. }
  532. template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
  533. void PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
  534. UpdateBlockDiagonalFtFMultiThreaded(
  535. BlockSparseMatrix* block_diagonal) const {
  536. auto transpose_block_structure = matrix_.transpose_block_structure();
  537. CHECK(transpose_block_structure != nullptr);
  538. auto block_diagonal_structure = block_diagonal->block_structure();
  539. const double* values = matrix_.values();
  540. double* values_diagonal = block_diagonal->mutable_values();
  541. const int num_col_blocks_e = num_col_blocks_e_;
  542. const int num_row_blocks_e = num_row_blocks_e_;
  543. ParallelFor(
  544. options_.context,
  545. num_col_blocks_e_,
  546. num_col_blocks_e + num_col_blocks_f_,
  547. options_.num_threads,
  548. [transpose_block_structure,
  549. block_diagonal_structure,
  550. num_col_blocks_e,
  551. num_row_blocks_e,
  552. values,
  553. values_diagonal](int col_block_id) {
  554. const int col_block_size =
  555. transpose_block_structure->rows[col_block_id].block.size;
  556. const int diagonal_block_id = col_block_id - num_col_blocks_e;
  557. const int cell_position =
  558. block_diagonal_structure->rows[diagonal_block_id].cells[0].position;
  559. double* cell_values = values_diagonal + cell_position;
  560. MatrixRef(cell_values, col_block_size, col_block_size).setZero();
  561. auto& cells = transpose_block_structure->rows[col_block_id].cells;
  562. const int num_cells = cells.size();
  563. int i = 0;
  564. for (; i < num_cells; ++i) {
  565. auto& cell = cells[i];
  566. const int row_block_id = cell.block_id;
  567. if (row_block_id >= num_row_blocks_e) break;
  568. const int row_block_size =
  569. transpose_block_structure->cols[row_block_id].size;
  570. // clang-format off
  571. MatrixTransposeMatrixMultiply
  572. <kRowBlockSize, kFBlockSize, kRowBlockSize, kFBlockSize, 1>(
  573. values + cell.position, row_block_size, col_block_size,
  574. values + cell.position, row_block_size, col_block_size,
  575. cell_values, 0, 0, col_block_size, col_block_size);
  576. // clang-format on
  577. }
  578. for (; i < num_cells; ++i) {
  579. auto& cell = cells[i];
  580. const int row_block_id = cell.block_id;
  581. const int row_block_size =
  582. transpose_block_structure->cols[row_block_id].size;
  583. // clang-format off
  584. MatrixTransposeMatrixMultiply
  585. <Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, 1>(
  586. values + cell.position, row_block_size, col_block_size,
  587. values + cell.position, row_block_size, col_block_size,
  588. cell_values, 0, 0, col_block_size, col_block_size);
  589. // clang-format on
  590. }
  591. },
  592. f_cols_partition_);
  593. }
  594. template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
  595. void PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
  596. UpdateBlockDiagonalFtF(BlockSparseMatrix* block_diagonal) const {
  597. if (options_.num_threads == 1) {
  598. UpdateBlockDiagonalFtFSingleThreaded(block_diagonal);
  599. } else {
  600. CHECK(options_.context != nullptr);
  601. UpdateBlockDiagonalFtFMultiThreaded(block_diagonal);
  602. }
  603. }
  604. } // namespace ceres::internal