cuda_block_structure.cc 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234
  1. // Ceres Solver - A fast non-linear least squares minimizer
  2. // Copyright 2023 Google Inc. All rights reserved.
  3. // http://ceres-solver.org/
  4. //
  5. // Redistribution and use in source and binary forms, with or without
  6. // modification, are permitted provided that the following conditions are met:
  7. //
  8. // * Redistributions of source code must retain the above copyright notice,
  9. // this list of conditions and the following disclaimer.
  10. // * Redistributions in binary form must reproduce the above copyright notice,
  11. // this list of conditions and the following disclaimer in the documentation
  12. // and/or other materials provided with the distribution.
  13. // * Neither the name of Google Inc. nor the names of its contributors may be
  14. // used to endorse or promote products derived from this software without
  15. // specific prior written permission.
  16. //
  17. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  18. // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  19. // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  20. // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  21. // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  22. // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  23. // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  24. // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  25. // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  26. // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  27. // POSSIBILITY OF SUCH DAMAGE.
  28. //
  29. // Authors: dmitriy.korchemkin@gmail.com (Dmitriy Korchemkin)
  30. #include "ceres/cuda_block_structure.h"
  31. #ifndef CERES_NO_CUDA
  32. namespace ceres::internal {
  33. namespace {
  34. // Dimension of a sorted array of blocks
  35. inline int Dimension(const std::vector<Block>& blocks) {
  36. if (blocks.empty()) {
  37. return 0;
  38. }
  39. const auto& last = blocks.back();
  40. return last.size + last.position;
  41. }
  42. } // namespace
  43. CudaBlockSparseStructure::CudaBlockSparseStructure(
  44. const CompressedRowBlockStructure& block_structure, ContextImpl* context)
  45. : CudaBlockSparseStructure(block_structure, 0, context) {}
  46. CudaBlockSparseStructure::CudaBlockSparseStructure(
  47. const CompressedRowBlockStructure& block_structure,
  48. const int num_col_blocks_e,
  49. ContextImpl* context)
  50. : first_cell_in_row_block_(context),
  51. value_offset_row_block_f_(context),
  52. cells_(context),
  53. row_blocks_(context),
  54. col_blocks_(context) {
  55. // Row blocks extracted from CompressedRowBlockStructure::rows
  56. std::vector<Block> row_blocks;
  57. // Column blocks can be reused as-is
  58. const auto& col_blocks = block_structure.cols;
  59. // Row block offset is an index of the first cell corresponding to row block
  60. std::vector<int> first_cell_in_row_block;
  61. // Offset of the first value in the first non-empty row-block of F sub-matrix
  62. std::vector<int> value_offset_row_block_f;
  63. // Flat array of all cells from all row-blocks
  64. std::vector<Cell> cells;
  65. int f_values_offset = -1;
  66. num_nonzeros_e_ = 0;
  67. is_crs_compatible_ = true;
  68. num_row_blocks_ = block_structure.rows.size();
  69. num_col_blocks_ = col_blocks.size();
  70. row_blocks.reserve(num_row_blocks_);
  71. first_cell_in_row_block.reserve(num_row_blocks_ + 1);
  72. value_offset_row_block_f.reserve(num_row_blocks_ + 1);
  73. num_nonzeros_ = 0;
  74. // Block-sparse matrices arising from block-jacobian writer are expected to
  75. // have sequential layout (for partitioned matrices - it is expected that both
  76. // E and F sub-matrices have sequential layout).
  77. bool sequential_layout = true;
  78. int row_block_id = 0;
  79. num_row_blocks_e_ = 0;
  80. for (; row_block_id < num_row_blocks_; ++row_block_id) {
  81. const auto& r = block_structure.rows[row_block_id];
  82. const int row_block_size = r.block.size;
  83. const int num_cells = r.cells.size();
  84. if (num_col_blocks_e == 0 || r.cells.size() == 0 ||
  85. r.cells[0].block_id >= num_col_blocks_e) {
  86. break;
  87. }
  88. num_row_blocks_e_ = row_block_id + 1;
  89. // In E sub-matrix there is exactly a single E cell in the row
  90. // since E cells are stored separately from F cells, crs-compatiblity of
  91. // F sub-matrix only breaks if there are more than 2 cells in row (that
  92. // is, more than 1 cell in F sub-matrix)
  93. if (num_cells > 2 && row_block_size > 1) {
  94. is_crs_compatible_ = false;
  95. }
  96. row_blocks.emplace_back(r.block);
  97. first_cell_in_row_block.push_back(cells.size());
  98. for (int cell_id = 0; cell_id < num_cells; ++cell_id) {
  99. const auto& c = r.cells[cell_id];
  100. const int col_block_size = col_blocks[c.block_id].size;
  101. const int cell_size = col_block_size * row_block_size;
  102. cells.push_back(c);
  103. if (cell_id == 0) {
  104. DCHECK(c.position == num_nonzeros_e_);
  105. num_nonzeros_e_ += cell_size;
  106. } else {
  107. if (f_values_offset == -1) {
  108. num_nonzeros_ = c.position;
  109. f_values_offset = c.position;
  110. }
  111. sequential_layout &= c.position == num_nonzeros_;
  112. num_nonzeros_ += cell_size;
  113. if (cell_id == 1) {
  114. // Correct value_offset_row_block_f for empty row-blocks of F
  115. // preceding this one
  116. for (auto it = value_offset_row_block_f.rbegin();
  117. it != value_offset_row_block_f.rend();
  118. ++it) {
  119. if (*it != -1) break;
  120. *it = c.position;
  121. }
  122. value_offset_row_block_f.push_back(c.position);
  123. }
  124. }
  125. }
  126. if (num_cells == 1) {
  127. value_offset_row_block_f.push_back(-1);
  128. }
  129. }
  130. for (; row_block_id < num_row_blocks_; ++row_block_id) {
  131. const auto& r = block_structure.rows[row_block_id];
  132. const int row_block_size = r.block.size;
  133. const int num_cells = r.cells.size();
  134. // After num_row_blocks_e_ row-blocks, there should be no cells in E
  135. // sub-matrix. Thus crs-compatibility of F sub-matrix breaks if there are
  136. // more than one cells in the row-block
  137. if (num_cells > 1 && row_block_size > 1) {
  138. is_crs_compatible_ = false;
  139. }
  140. row_blocks.emplace_back(r.block);
  141. first_cell_in_row_block.push_back(cells.size());
  142. if (r.cells.empty()) {
  143. value_offset_row_block_f.push_back(-1);
  144. } else {
  145. for (auto it = value_offset_row_block_f.rbegin();
  146. it != value_offset_row_block_f.rend();
  147. --it) {
  148. if (*it != -1) break;
  149. *it = cells[0].position;
  150. }
  151. value_offset_row_block_f.push_back(r.cells[0].position);
  152. }
  153. for (const auto& c : r.cells) {
  154. const int col_block_size = col_blocks[c.block_id].size;
  155. const int cell_size = col_block_size * row_block_size;
  156. cells.push_back(c);
  157. DCHECK(c.block_id >= num_col_blocks_e);
  158. if (f_values_offset == -1) {
  159. num_nonzeros_ = c.position;
  160. f_values_offset = c.position;
  161. }
  162. sequential_layout &= c.position == num_nonzeros_;
  163. num_nonzeros_ += cell_size;
  164. }
  165. }
  166. if (f_values_offset == -1) {
  167. f_values_offset = num_nonzeros_e_;
  168. num_nonzeros_ = num_nonzeros_e_;
  169. }
  170. // Fill non-zero offsets for the last rows of F submatrix
  171. for (auto it = value_offset_row_block_f.rbegin();
  172. it != value_offset_row_block_f.rend();
  173. ++it) {
  174. if (*it != -1) break;
  175. *it = num_nonzeros_;
  176. }
  177. value_offset_row_block_f.push_back(num_nonzeros_);
  178. CHECK_EQ(num_nonzeros_e_, f_values_offset);
  179. first_cell_in_row_block.push_back(cells.size());
  180. num_cells_ = cells.size();
  181. num_rows_ = Dimension(row_blocks);
  182. num_cols_ = Dimension(col_blocks);
  183. CHECK(sequential_layout);
  184. if (VLOG_IS_ON(3)) {
  185. const size_t first_cell_in_row_block_size =
  186. first_cell_in_row_block.size() * sizeof(int);
  187. const size_t cells_size = cells.size() * sizeof(Cell);
  188. const size_t row_blocks_size = row_blocks.size() * sizeof(Block);
  189. const size_t col_blocks_size = col_blocks.size() * sizeof(Block);
  190. const size_t total_size = first_cell_in_row_block_size + cells_size +
  191. col_blocks_size + row_blocks_size;
  192. const double ratio =
  193. (100. * total_size) / (num_nonzeros_ * (sizeof(int) + sizeof(double)) +
  194. num_rows_ * sizeof(int));
  195. VLOG(3) << "\nCudaBlockSparseStructure:\n"
  196. "\tRow block offsets: "
  197. << first_cell_in_row_block_size
  198. << " bytes\n"
  199. "\tColumn blocks: "
  200. << col_blocks_size
  201. << " bytes\n"
  202. "\tRow blocks: "
  203. << row_blocks_size
  204. << " bytes\n"
  205. "\tCells: "
  206. << cells_size << " bytes\n\tTotal: " << total_size
  207. << " bytes of GPU memory (" << ratio << "% of CRS matrix size)";
  208. }
  209. first_cell_in_row_block_.CopyFromCpuVector(first_cell_in_row_block);
  210. cells_.CopyFromCpuVector(cells);
  211. row_blocks_.CopyFromCpuVector(row_blocks);
  212. col_blocks_.CopyFromCpuVector(col_blocks);
  213. if (num_col_blocks_e || num_row_blocks_e_) {
  214. value_offset_row_block_f_.CopyFromCpuVector(value_offset_row_block_f);
  215. }
  216. }
  217. } // namespace ceres::internal
  218. #endif // CERES_NO_CUDA