implicit_schur_complement.cc 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278
  1. // Ceres Solver - A fast non-linear least squares minimizer
  2. // Copyright 2023 Google Inc. All rights reserved.
  3. // http://ceres-solver.org/
  4. //
  5. // Redistribution and use in source and binary forms, with or without
  6. // modification, are permitted provided that the following conditions are met:
  7. //
  8. // * Redistributions of source code must retain the above copyright notice,
  9. // this list of conditions and the following disclaimer.
  10. // * Redistributions in binary form must reproduce the above copyright notice,
  11. // this list of conditions and the following disclaimer in the documentation
  12. // and/or other materials provided with the distribution.
  13. // * Neither the name of Google Inc. nor the names of its contributors may be
  14. // used to endorse or promote products derived from this software without
  15. // specific prior written permission.
  16. //
  17. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  18. // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  19. // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  20. // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  21. // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  22. // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  23. // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  24. // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  25. // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  26. // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  27. // POSSIBILITY OF SUCH DAMAGE.
  28. //
  29. // Author: sameeragarwal@google.com (Sameer Agarwal)
  30. #include "ceres/implicit_schur_complement.h"
  31. #include "Eigen/Dense"
  32. #include "ceres/block_sparse_matrix.h"
  33. #include "ceres/block_structure.h"
  34. #include "ceres/internal/eigen.h"
  35. #include "ceres/linear_solver.h"
  36. #include "ceres/parallel_for.h"
  37. #include "ceres/parallel_vector_ops.h"
  38. #include "ceres/types.h"
  39. #include "glog/logging.h"
  40. namespace ceres::internal {
  41. ImplicitSchurComplement::ImplicitSchurComplement(
  42. const LinearSolver::Options& options)
  43. : options_(options) {}
  44. void ImplicitSchurComplement::Init(const BlockSparseMatrix& A,
  45. const double* D,
  46. const double* b) {
  47. // Since initialization is reasonably heavy, perhaps we can save on
  48. // constructing a new object everytime.
  49. if (A_ == nullptr) {
  50. A_ = PartitionedMatrixViewBase::Create(options_, A);
  51. }
  52. D_ = D;
  53. b_ = b;
  54. compute_ftf_inverse_ =
  55. options_.use_spse_initialization ||
  56. options_.preconditioner_type == JACOBI ||
  57. options_.preconditioner_type == SCHUR_POWER_SERIES_EXPANSION;
  58. // Initialize temporary storage and compute the block diagonals of
  59. // E'E and F'E.
  60. if (block_diagonal_EtE_inverse_ == nullptr) {
  61. block_diagonal_EtE_inverse_ = A_->CreateBlockDiagonalEtE();
  62. if (compute_ftf_inverse_) {
  63. block_diagonal_FtF_inverse_ = A_->CreateBlockDiagonalFtF();
  64. }
  65. rhs_.resize(A_->num_cols_f());
  66. rhs_.setZero();
  67. tmp_rows_.resize(A_->num_rows());
  68. tmp_e_cols_.resize(A_->num_cols_e());
  69. tmp_e_cols_2_.resize(A_->num_cols_e());
  70. tmp_f_cols_.resize(A_->num_cols_f());
  71. } else {
  72. A_->UpdateBlockDiagonalEtE(block_diagonal_EtE_inverse_.get());
  73. if (compute_ftf_inverse_) {
  74. A_->UpdateBlockDiagonalFtF(block_diagonal_FtF_inverse_.get());
  75. }
  76. }
  77. // The block diagonals of the augmented linear system contain
  78. // contributions from the diagonal D if it is non-null. Add that to
  79. // the block diagonals and invert them.
  80. AddDiagonalAndInvert(D_, block_diagonal_EtE_inverse_.get());
  81. if (compute_ftf_inverse_) {
  82. AddDiagonalAndInvert((D_ == nullptr) ? nullptr : D_ + A_->num_cols_e(),
  83. block_diagonal_FtF_inverse_.get());
  84. }
  85. // Compute the RHS of the Schur complement system.
  86. UpdateRhs();
  87. }
  88. // Evaluate the product
  89. //
  90. // Sx = [F'F - F'E (E'E)^-1 E'F]x
  91. //
  92. // By breaking it down into individual matrix vector products
  93. // involving the matrices E and F. This is implemented using a
  94. // PartitionedMatrixView of the input matrix A.
  95. void ImplicitSchurComplement::RightMultiplyAndAccumulate(const double* x,
  96. double* y) const {
  97. // y1 = F x
  98. ParallelSetZero(options_.context, options_.num_threads, tmp_rows_);
  99. A_->RightMultiplyAndAccumulateF(x, tmp_rows_.data());
  100. // y2 = E' y1
  101. ParallelSetZero(options_.context, options_.num_threads, tmp_e_cols_);
  102. A_->LeftMultiplyAndAccumulateE(tmp_rows_.data(), tmp_e_cols_.data());
  103. // y3 = -(E'E)^-1 y2
  104. ParallelSetZero(options_.context, options_.num_threads, tmp_e_cols_2_);
  105. block_diagonal_EtE_inverse_->RightMultiplyAndAccumulate(tmp_e_cols_.data(),
  106. tmp_e_cols_2_.data(),
  107. options_.context,
  108. options_.num_threads);
  109. ParallelAssign(
  110. options_.context, options_.num_threads, tmp_e_cols_2_, -tmp_e_cols_2_);
  111. // y1 = y1 + E y3
  112. A_->RightMultiplyAndAccumulateE(tmp_e_cols_2_.data(), tmp_rows_.data());
  113. // y5 = D * x
  114. if (D_ != nullptr) {
  115. ConstVectorRef Dref(D_ + A_->num_cols_e(), num_cols());
  116. VectorRef y_cols(y, num_cols());
  117. ParallelAssign(
  118. options_.context,
  119. options_.num_threads,
  120. y_cols,
  121. (Dref.array().square() * ConstVectorRef(x, num_cols()).array()));
  122. } else {
  123. ParallelSetZero(options_.context, options_.num_threads, y, num_cols());
  124. }
  125. // y = y5 + F' y1
  126. A_->LeftMultiplyAndAccumulateF(tmp_rows_.data(), y);
  127. }
  128. void ImplicitSchurComplement::InversePowerSeriesOperatorRightMultiplyAccumulate(
  129. const double* x, double* y) const {
  130. CHECK(compute_ftf_inverse_);
  131. // y1 = F x
  132. ParallelSetZero(options_.context, options_.num_threads, tmp_rows_);
  133. A_->RightMultiplyAndAccumulateF(x, tmp_rows_.data());
  134. // y2 = E' y1
  135. ParallelSetZero(options_.context, options_.num_threads, tmp_e_cols_);
  136. A_->LeftMultiplyAndAccumulateE(tmp_rows_.data(), tmp_e_cols_.data());
  137. // y3 = (E'E)^-1 y2
  138. ParallelSetZero(options_.context, options_.num_threads, tmp_e_cols_2_);
  139. block_diagonal_EtE_inverse_->RightMultiplyAndAccumulate(tmp_e_cols_.data(),
  140. tmp_e_cols_2_.data(),
  141. options_.context,
  142. options_.num_threads);
  143. // y1 = E y3
  144. ParallelSetZero(options_.context, options_.num_threads, tmp_rows_);
  145. A_->RightMultiplyAndAccumulateE(tmp_e_cols_2_.data(), tmp_rows_.data());
  146. // y4 = F' y1
  147. ParallelSetZero(options_.context, options_.num_threads, tmp_f_cols_);
  148. A_->LeftMultiplyAndAccumulateF(tmp_rows_.data(), tmp_f_cols_.data());
  149. // y += (F'F)^-1 y4
  150. block_diagonal_FtF_inverse_->RightMultiplyAndAccumulate(
  151. tmp_f_cols_.data(), y, options_.context, options_.num_threads);
  152. }
  153. // Given a block diagonal matrix and an optional array of diagonal
  154. // entries D, add them to the diagonal of the matrix and compute the
  155. // inverse of each diagonal block.
  156. void ImplicitSchurComplement::AddDiagonalAndInvert(
  157. const double* D, BlockSparseMatrix* block_diagonal) {
  158. const CompressedRowBlockStructure* block_diagonal_structure =
  159. block_diagonal->block_structure();
  160. ParallelFor(options_.context,
  161. 0,
  162. block_diagonal_structure->rows.size(),
  163. options_.num_threads,
  164. [block_diagonal_structure, D, block_diagonal](int row_block_id) {
  165. auto& row = block_diagonal_structure->rows[row_block_id];
  166. const int row_block_pos = row.block.position;
  167. const int row_block_size = row.block.size;
  168. const Cell& cell = row.cells[0];
  169. MatrixRef m(block_diagonal->mutable_values() + cell.position,
  170. row_block_size,
  171. row_block_size);
  172. if (D != nullptr) {
  173. ConstVectorRef d(D + row_block_pos, row_block_size);
  174. m += d.array().square().matrix().asDiagonal();
  175. }
  176. m = m.selfadjointView<Eigen::Upper>().llt().solve(
  177. Matrix::Identity(row_block_size, row_block_size));
  178. });
  179. }
  180. // Similar to RightMultiplyAndAccumulate, use the block structure of the matrix
  181. // A to compute y = (E'E)^-1 (E'b - E'F x).
  182. void ImplicitSchurComplement::BackSubstitute(const double* x, double* y) {
  183. const int num_cols_e = A_->num_cols_e();
  184. const int num_cols_f = A_->num_cols_f();
  185. const int num_cols = A_->num_cols();
  186. const int num_rows = A_->num_rows();
  187. // y1 = F x
  188. ParallelSetZero(options_.context, options_.num_threads, tmp_rows_);
  189. A_->RightMultiplyAndAccumulateF(x, tmp_rows_.data());
  190. // y2 = b - y1
  191. ParallelAssign(options_.context,
  192. options_.num_threads,
  193. tmp_rows_,
  194. ConstVectorRef(b_, num_rows) - tmp_rows_);
  195. // y3 = E' y2
  196. ParallelSetZero(options_.context, options_.num_threads, tmp_e_cols_);
  197. A_->LeftMultiplyAndAccumulateE(tmp_rows_.data(), tmp_e_cols_.data());
  198. // y = (E'E)^-1 y3
  199. ParallelSetZero(options_.context, options_.num_threads, y, num_cols);
  200. block_diagonal_EtE_inverse_->RightMultiplyAndAccumulate(
  201. tmp_e_cols_.data(), y, options_.context, options_.num_threads);
  202. // The full solution vector y has two blocks. The first block of
  203. // variables corresponds to the eliminated variables, which we just
  204. // computed via back substitution. The second block of variables
  205. // corresponds to the Schur complement system, so we just copy those
  206. // values from the solution to the Schur complement.
  207. VectorRef y_cols_f(y + num_cols_e, num_cols_f);
  208. ParallelAssign(options_.context,
  209. options_.num_threads,
  210. y_cols_f,
  211. ConstVectorRef(x, num_cols_f));
  212. }
  213. // Compute the RHS of the Schur complement system.
  214. //
  215. // rhs = F'b - F'E (E'E)^-1 E'b
  216. //
  217. // Like BackSubstitute, we use the block structure of A to implement
  218. // this using a series of matrix vector products.
  219. void ImplicitSchurComplement::UpdateRhs() {
  220. // y1 = E'b
  221. ParallelSetZero(options_.context, options_.num_threads, tmp_e_cols_);
  222. A_->LeftMultiplyAndAccumulateE(b_, tmp_e_cols_.data());
  223. // y2 = (E'E)^-1 y1
  224. ParallelSetZero(options_.context, options_.num_threads, tmp_e_cols_2_);
  225. block_diagonal_EtE_inverse_->RightMultiplyAndAccumulate(tmp_e_cols_.data(),
  226. tmp_e_cols_2_.data(),
  227. options_.context,
  228. options_.num_threads);
  229. // y3 = E y2
  230. ParallelSetZero(options_.context, options_.num_threads, tmp_rows_);
  231. A_->RightMultiplyAndAccumulateE(tmp_e_cols_2_.data(), tmp_rows_.data());
  232. // y3 = b - y3
  233. ParallelAssign(options_.context,
  234. options_.num_threads,
  235. tmp_rows_,
  236. ConstVectorRef(b_, A_->num_rows()) - tmp_rows_);
  237. // rhs = F' y3
  238. ParallelSetZero(options_.context, options_.num_threads, rhs_);
  239. A_->LeftMultiplyAndAccumulateF(tmp_rows_.data(), rhs_.data());
  240. }
  241. } // namespace ceres::internal