block_sparse_matrix_test.cc 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675
  1. // Ceres Solver - A fast non-linear least squares minimizer
  2. // Copyright 2023 Google Inc. All rights reserved.
  3. // http://ceres-solver.org/
  4. //
  5. // Redistribution and use in source and binary forms, with or without
  6. // modification, are permitted provided that the following conditions are met:
  7. //
  8. // * Redistributions of source code must retain the above copyright notice,
  9. // this list of conditions and the following disclaimer.
  10. // * Redistributions in binary form must reproduce the above copyright notice,
  11. // this list of conditions and the following disclaimer in the documentation
  12. // and/or other materials provided with the distribution.
  13. // * Neither the name of Google Inc. nor the names of its contributors may be
  14. // used to endorse or promote products derived from this software without
  15. // specific prior written permission.
  16. //
  17. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  18. // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  19. // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  20. // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  21. // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  22. // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  23. // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  24. // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  25. // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  26. // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  27. // POSSIBILITY OF SUCH DAMAGE.
  28. //
  29. // Author: sameeragarwal@google.com (Sameer Agarwal)
  30. #include "ceres/block_sparse_matrix.h"
  31. #include <algorithm>
  32. #include <memory>
  33. #include <random>
  34. #include <string>
  35. #include <vector>
  36. #include "ceres/casts.h"
  37. #include "ceres/crs_matrix.h"
  38. #include "ceres/internal/eigen.h"
  39. #include "ceres/linear_least_squares_problems.h"
  40. #include "ceres/triplet_sparse_matrix.h"
  41. #include "glog/logging.h"
  42. #include "gtest/gtest.h"
  43. namespace ceres {
  44. namespace internal {
  45. namespace {
  46. std::unique_ptr<BlockSparseMatrix> CreateTestMatrixFromId(int id) {
  47. if (id == 0) {
  48. // Create the following block sparse matrix:
  49. // [ 1 2 0 0 0 0 ]
  50. // [ 3 4 0 0 0 0 ]
  51. // [ 0 0 5 6 7 0 ]
  52. // [ 0 0 8 9 10 0 ]
  53. CompressedRowBlockStructure* bs = new CompressedRowBlockStructure;
  54. bs->cols = {
  55. // Block size 2, position 0.
  56. Block(2, 0),
  57. // Block size 3, position 2.
  58. Block(3, 2),
  59. // Block size 1, position 5.
  60. Block(1, 5),
  61. };
  62. bs->rows = {CompressedRow(1), CompressedRow(1)};
  63. bs->rows[0].block = Block(2, 0);
  64. bs->rows[0].cells = {Cell(0, 0)};
  65. bs->rows[1].block = Block(2, 2);
  66. bs->rows[1].cells = {Cell(1, 4)};
  67. auto m = std::make_unique<BlockSparseMatrix>(bs);
  68. EXPECT_NE(m, nullptr);
  69. EXPECT_EQ(m->num_rows(), 4);
  70. EXPECT_EQ(m->num_cols(), 6);
  71. EXPECT_EQ(m->num_nonzeros(), 10);
  72. double* values = m->mutable_values();
  73. for (int i = 0; i < 10; ++i) {
  74. values[i] = i + 1;
  75. }
  76. return m;
  77. } else if (id == 1) {
  78. // Create the following block sparse matrix:
  79. // [ 1 2 0 5 6 0 ]
  80. // [ 3 4 0 7 8 0 ]
  81. // [ 0 0 9 0 0 0 ]
  82. CompressedRowBlockStructure* bs = new CompressedRowBlockStructure;
  83. bs->cols = {
  84. // Block size 2, position 0.
  85. Block(2, 0),
  86. // Block size 1, position 2.
  87. Block(1, 2),
  88. // Block size 2, position 3.
  89. Block(2, 3),
  90. // Block size 1, position 5.
  91. Block(1, 5),
  92. };
  93. bs->rows = {CompressedRow(2), CompressedRow(1)};
  94. bs->rows[0].block = Block(2, 0);
  95. bs->rows[0].cells = {Cell(0, 0), Cell(2, 4)};
  96. bs->rows[1].block = Block(1, 2);
  97. bs->rows[1].cells = {Cell(1, 8)};
  98. auto m = std::make_unique<BlockSparseMatrix>(bs);
  99. EXPECT_NE(m, nullptr);
  100. EXPECT_EQ(m->num_rows(), 3);
  101. EXPECT_EQ(m->num_cols(), 6);
  102. EXPECT_EQ(m->num_nonzeros(), 9);
  103. double* values = m->mutable_values();
  104. for (int i = 0; i < 9; ++i) {
  105. values[i] = i + 1;
  106. }
  107. return m;
  108. } else if (id == 2) {
  109. // Create the following block sparse matrix:
  110. // [ 1 2 0 | 6 7 0 ]
  111. // [ 3 4 0 | 8 9 0 ]
  112. // [ 0 0 5 | 0 0 10]
  113. // With cells of the left submatrix preceding cells of the right submatrix
  114. CompressedRowBlockStructure* bs = new CompressedRowBlockStructure;
  115. bs->cols = {
  116. // Block size 2, position 0.
  117. Block(2, 0),
  118. // Block size 1, position 2.
  119. Block(1, 2),
  120. // Block size 2, position 3.
  121. Block(2, 3),
  122. // Block size 1, position 5.
  123. Block(1, 5),
  124. };
  125. bs->rows = {CompressedRow(2), CompressedRow(1)};
  126. bs->rows[0].block = Block(2, 0);
  127. bs->rows[0].cells = {Cell(0, 0), Cell(2, 5)};
  128. bs->rows[1].block = Block(1, 2);
  129. bs->rows[1].cells = {Cell(1, 4), Cell(3, 9)};
  130. auto m = std::make_unique<BlockSparseMatrix>(bs);
  131. EXPECT_NE(m, nullptr);
  132. EXPECT_EQ(m->num_rows(), 3);
  133. EXPECT_EQ(m->num_cols(), 6);
  134. EXPECT_EQ(m->num_nonzeros(), 10);
  135. double* values = m->mutable_values();
  136. for (int i = 0; i < 10; ++i) {
  137. values[i] = i + 1;
  138. }
  139. return m;
  140. }
  141. return nullptr;
  142. }
  143. } // namespace
  144. const int kNumThreads = 4;
  145. class BlockSparseMatrixTest : public ::testing::Test {
  146. protected:
  147. void SetUp() final {
  148. std::unique_ptr<LinearLeastSquaresProblem> problem =
  149. CreateLinearLeastSquaresProblemFromId(2);
  150. CHECK(problem != nullptr);
  151. a_.reset(down_cast<BlockSparseMatrix*>(problem->A.release()));
  152. problem = CreateLinearLeastSquaresProblemFromId(1);
  153. CHECK(problem != nullptr);
  154. b_.reset(down_cast<TripletSparseMatrix*>(problem->A.release()));
  155. CHECK_EQ(a_->num_rows(), b_->num_rows());
  156. CHECK_EQ(a_->num_cols(), b_->num_cols());
  157. CHECK_EQ(a_->num_nonzeros(), b_->num_nonzeros());
  158. context_.EnsureMinimumThreads(kNumThreads);
  159. BlockSparseMatrix::RandomMatrixOptions options;
  160. options.num_row_blocks = 1000;
  161. options.min_row_block_size = 1;
  162. options.max_row_block_size = 8;
  163. options.num_col_blocks = 100;
  164. options.min_col_block_size = 1;
  165. options.max_col_block_size = 8;
  166. options.block_density = 0.05;
  167. std::mt19937 rng;
  168. c_ = BlockSparseMatrix::CreateRandomMatrix(options, rng);
  169. }
  170. std::unique_ptr<BlockSparseMatrix> a_;
  171. std::unique_ptr<TripletSparseMatrix> b_;
  172. std::unique_ptr<BlockSparseMatrix> c_;
  173. ContextImpl context_;
  174. };
  175. TEST_F(BlockSparseMatrixTest, SetZeroTest) {
  176. a_->SetZero();
  177. EXPECT_EQ(13, a_->num_nonzeros());
  178. }
  179. TEST_F(BlockSparseMatrixTest, RightMultiplyAndAccumulateTest) {
  180. Vector y_a = Vector::Zero(a_->num_rows());
  181. Vector y_b = Vector::Zero(a_->num_rows());
  182. for (int i = 0; i < a_->num_cols(); ++i) {
  183. Vector x = Vector::Zero(a_->num_cols());
  184. x[i] = 1.0;
  185. a_->RightMultiplyAndAccumulate(x.data(), y_a.data());
  186. b_->RightMultiplyAndAccumulate(x.data(), y_b.data());
  187. EXPECT_LT((y_a - y_b).norm(), 1e-12);
  188. }
  189. }
  190. TEST_F(BlockSparseMatrixTest, RightMultiplyAndAccumulateParallelTest) {
  191. Vector y_0 = Vector::Random(a_->num_rows());
  192. Vector y_s = y_0;
  193. Vector y_p = y_0;
  194. Vector x = Vector::Random(a_->num_cols());
  195. a_->RightMultiplyAndAccumulate(x.data(), y_s.data());
  196. a_->RightMultiplyAndAccumulate(x.data(), y_p.data(), &context_, kNumThreads);
  197. // Current parallel implementation is expected to be bit-exact
  198. EXPECT_EQ((y_s - y_p).norm(), 0.);
  199. }
  200. TEST_F(BlockSparseMatrixTest, LeftMultiplyAndAccumulateTest) {
  201. Vector y_a = Vector::Zero(a_->num_cols());
  202. Vector y_b = Vector::Zero(a_->num_cols());
  203. for (int i = 0; i < a_->num_rows(); ++i) {
  204. Vector x = Vector::Zero(a_->num_rows());
  205. x[i] = 1.0;
  206. a_->LeftMultiplyAndAccumulate(x.data(), y_a.data());
  207. b_->LeftMultiplyAndAccumulate(x.data(), y_b.data());
  208. EXPECT_LT((y_a - y_b).norm(), 1e-12);
  209. }
  210. }
  211. TEST_F(BlockSparseMatrixTest, LeftMultiplyAndAccumulateParallelTest) {
  212. Vector y_0 = Vector::Random(a_->num_cols());
  213. Vector y_s = y_0;
  214. Vector y_p = y_0;
  215. Vector x = Vector::Random(a_->num_rows());
  216. a_->LeftMultiplyAndAccumulate(x.data(), y_s.data());
  217. a_->LeftMultiplyAndAccumulate(x.data(), y_p.data(), &context_, kNumThreads);
  218. // Parallel implementation for left products uses a different order of
  219. // traversal, thus results might be different
  220. EXPECT_LT((y_s - y_p).norm(), 1e-12);
  221. }
  222. TEST_F(BlockSparseMatrixTest, SquaredColumnNormTest) {
  223. Vector y_a = Vector::Zero(a_->num_cols());
  224. Vector y_b = Vector::Zero(a_->num_cols());
  225. a_->SquaredColumnNorm(y_a.data());
  226. b_->SquaredColumnNorm(y_b.data());
  227. EXPECT_LT((y_a - y_b).norm(), 1e-12);
  228. }
  229. TEST_F(BlockSparseMatrixTest, SquaredColumnNormParallelTest) {
  230. Vector y_a = Vector::Zero(c_->num_cols());
  231. Vector y_b = Vector::Zero(c_->num_cols());
  232. c_->SquaredColumnNorm(y_a.data());
  233. c_->SquaredColumnNorm(y_b.data(), &context_, kNumThreads);
  234. EXPECT_LT((y_a - y_b).norm(), 1e-12);
  235. }
  236. TEST_F(BlockSparseMatrixTest, ScaleColumnsTest) {
  237. const Vector scale = Vector::Random(c_->num_cols()).cwiseAbs();
  238. const Vector x = Vector::Random(c_->num_rows());
  239. Vector y_expected = Vector::Zero(c_->num_cols());
  240. c_->LeftMultiplyAndAccumulate(x.data(), y_expected.data());
  241. y_expected.array() *= scale.array();
  242. c_->ScaleColumns(scale.data());
  243. Vector y_observed = Vector::Zero(c_->num_cols());
  244. c_->LeftMultiplyAndAccumulate(x.data(), y_observed.data());
  245. EXPECT_GT(y_expected.norm(), 1.);
  246. EXPECT_LT((y_observed - y_expected).norm(), 1e-12 * y_expected.norm());
  247. }
  248. TEST_F(BlockSparseMatrixTest, ScaleColumnsParallelTest) {
  249. const Vector scale = Vector::Random(c_->num_cols()).cwiseAbs();
  250. const Vector x = Vector::Random(c_->num_rows());
  251. Vector y_expected = Vector::Zero(c_->num_cols());
  252. c_->LeftMultiplyAndAccumulate(x.data(), y_expected.data());
  253. y_expected.array() *= scale.array();
  254. c_->ScaleColumns(scale.data(), &context_, kNumThreads);
  255. Vector y_observed = Vector::Zero(c_->num_cols());
  256. c_->LeftMultiplyAndAccumulate(x.data(), y_observed.data());
  257. EXPECT_GT(y_expected.norm(), 1.);
  258. EXPECT_LT((y_observed - y_expected).norm(), 1e-12 * y_expected.norm());
  259. }
  260. TEST_F(BlockSparseMatrixTest, ToDenseMatrixTest) {
  261. Matrix m_a;
  262. Matrix m_b;
  263. a_->ToDenseMatrix(&m_a);
  264. b_->ToDenseMatrix(&m_b);
  265. EXPECT_LT((m_a - m_b).norm(), 1e-12);
  266. }
  267. TEST_F(BlockSparseMatrixTest, AppendRows) {
  268. std::unique_ptr<LinearLeastSquaresProblem> problem =
  269. CreateLinearLeastSquaresProblemFromId(2);
  270. std::unique_ptr<BlockSparseMatrix> m(
  271. down_cast<BlockSparseMatrix*>(problem->A.release()));
  272. a_->AppendRows(*m);
  273. EXPECT_EQ(a_->num_rows(), 2 * m->num_rows());
  274. EXPECT_EQ(a_->num_cols(), m->num_cols());
  275. problem = CreateLinearLeastSquaresProblemFromId(1);
  276. std::unique_ptr<TripletSparseMatrix> m2(
  277. down_cast<TripletSparseMatrix*>(problem->A.release()));
  278. b_->AppendRows(*m2);
  279. Vector y_a = Vector::Zero(a_->num_rows());
  280. Vector y_b = Vector::Zero(a_->num_rows());
  281. for (int i = 0; i < a_->num_cols(); ++i) {
  282. Vector x = Vector::Zero(a_->num_cols());
  283. x[i] = 1.0;
  284. y_a.setZero();
  285. y_b.setZero();
  286. a_->RightMultiplyAndAccumulate(x.data(), y_a.data());
  287. b_->RightMultiplyAndAccumulate(x.data(), y_b.data());
  288. EXPECT_LT((y_a - y_b).norm(), 1e-12);
  289. }
  290. }
  291. TEST_F(BlockSparseMatrixTest, AppendDeleteRowsTransposedStructure) {
  292. auto problem = CreateLinearLeastSquaresProblemFromId(2);
  293. std::unique_ptr<BlockSparseMatrix> m(
  294. down_cast<BlockSparseMatrix*>(problem->A.release()));
  295. auto block_structure = a_->block_structure();
  296. // Several AppendRows and DeleteRowBlocks operations are applied to matrix,
  297. // with regular and transpose block structures being compared after each
  298. // operation.
  299. //
  300. // Non-negative values encode number of row blocks to remove
  301. // -1 encodes appending matrix m
  302. const int num_row_blocks_to_delete[] = {0, -1, 1, -1, 8, -1, 10};
  303. for (auto& t : num_row_blocks_to_delete) {
  304. if (t == -1) {
  305. a_->AppendRows(*m);
  306. } else if (t > 0) {
  307. CHECK_GE(block_structure->rows.size(), t);
  308. a_->DeleteRowBlocks(t);
  309. }
  310. auto block_structure = a_->block_structure();
  311. auto transpose_block_structure = a_->transpose_block_structure();
  312. ASSERT_NE(block_structure, nullptr);
  313. ASSERT_NE(transpose_block_structure, nullptr);
  314. EXPECT_EQ(block_structure->rows.size(),
  315. transpose_block_structure->cols.size());
  316. EXPECT_EQ(block_structure->cols.size(),
  317. transpose_block_structure->rows.size());
  318. std::vector<int> nnz_col(transpose_block_structure->rows.size());
  319. for (int i = 0; i < block_structure->cols.size(); ++i) {
  320. EXPECT_EQ(block_structure->cols[i].position,
  321. transpose_block_structure->rows[i].block.position);
  322. const int col_size = transpose_block_structure->rows[i].block.size;
  323. EXPECT_EQ(block_structure->cols[i].size, col_size);
  324. for (auto& col_cell : transpose_block_structure->rows[i].cells) {
  325. int matches = 0;
  326. const int row_block_id = col_cell.block_id;
  327. nnz_col[i] +=
  328. col_size * transpose_block_structure->cols[row_block_id].size;
  329. for (auto& row_cell : block_structure->rows[row_block_id].cells) {
  330. if (row_cell.block_id != i) continue;
  331. EXPECT_EQ(row_cell.position, col_cell.position);
  332. ++matches;
  333. }
  334. EXPECT_EQ(matches, 1);
  335. }
  336. EXPECT_EQ(nnz_col[i], transpose_block_structure->rows[i].nnz);
  337. if (i > 0) {
  338. nnz_col[i] += nnz_col[i - 1];
  339. }
  340. EXPECT_EQ(nnz_col[i], transpose_block_structure->rows[i].cumulative_nnz);
  341. }
  342. for (int i = 0; i < block_structure->rows.size(); ++i) {
  343. EXPECT_EQ(block_structure->rows[i].block.position,
  344. transpose_block_structure->cols[i].position);
  345. EXPECT_EQ(block_structure->rows[i].block.size,
  346. transpose_block_structure->cols[i].size);
  347. for (auto& row_cell : block_structure->rows[i].cells) {
  348. int matches = 0;
  349. const int col_block_id = row_cell.block_id;
  350. for (auto& col_cell :
  351. transpose_block_structure->rows[col_block_id].cells) {
  352. if (col_cell.block_id != i) continue;
  353. EXPECT_EQ(col_cell.position, row_cell.position);
  354. ++matches;
  355. }
  356. EXPECT_EQ(matches, 1);
  357. }
  358. }
  359. }
  360. }
  361. TEST_F(BlockSparseMatrixTest, AppendAndDeleteBlockDiagonalMatrix) {
  362. const std::vector<Block>& column_blocks = a_->block_structure()->cols;
  363. const int num_cols =
  364. column_blocks.back().size + column_blocks.back().position;
  365. Vector diagonal(num_cols);
  366. for (int i = 0; i < num_cols; ++i) {
  367. diagonal(i) = 2 * i * i + 1;
  368. }
  369. std::unique_ptr<BlockSparseMatrix> appendage(
  370. BlockSparseMatrix::CreateDiagonalMatrix(diagonal.data(), column_blocks));
  371. a_->AppendRows(*appendage);
  372. Vector y_a, y_b;
  373. y_a.resize(a_->num_rows());
  374. y_b.resize(a_->num_rows());
  375. for (int i = 0; i < a_->num_cols(); ++i) {
  376. Vector x = Vector::Zero(a_->num_cols());
  377. x[i] = 1.0;
  378. y_a.setZero();
  379. y_b.setZero();
  380. a_->RightMultiplyAndAccumulate(x.data(), y_a.data());
  381. b_->RightMultiplyAndAccumulate(x.data(), y_b.data());
  382. EXPECT_LT((y_a.head(b_->num_rows()) - y_b.head(b_->num_rows())).norm(),
  383. 1e-12);
  384. Vector expected_tail = Vector::Zero(a_->num_cols());
  385. expected_tail(i) = diagonal(i);
  386. EXPECT_LT((y_a.tail(a_->num_cols()) - expected_tail).norm(), 1e-12);
  387. }
  388. a_->DeleteRowBlocks(column_blocks.size());
  389. EXPECT_EQ(a_->num_rows(), b_->num_rows());
  390. EXPECT_EQ(a_->num_cols(), b_->num_cols());
  391. y_a.resize(a_->num_rows());
  392. y_b.resize(a_->num_rows());
  393. for (int i = 0; i < a_->num_cols(); ++i) {
  394. Vector x = Vector::Zero(a_->num_cols());
  395. x[i] = 1.0;
  396. y_a.setZero();
  397. y_b.setZero();
  398. a_->RightMultiplyAndAccumulate(x.data(), y_a.data());
  399. b_->RightMultiplyAndAccumulate(x.data(), y_b.data());
  400. EXPECT_LT((y_a - y_b).norm(), 1e-12);
  401. }
  402. }
  403. TEST(BlockSparseMatrix, CreateDiagonalMatrix) {
  404. std::vector<Block> column_blocks;
  405. column_blocks.emplace_back(2, 0);
  406. column_blocks.emplace_back(1, 2);
  407. column_blocks.emplace_back(3, 3);
  408. const int num_cols =
  409. column_blocks.back().size + column_blocks.back().position;
  410. Vector diagonal(num_cols);
  411. for (int i = 0; i < num_cols; ++i) {
  412. diagonal(i) = 2 * i * i + 1;
  413. }
  414. std::unique_ptr<BlockSparseMatrix> m(
  415. BlockSparseMatrix::CreateDiagonalMatrix(diagonal.data(), column_blocks));
  416. const CompressedRowBlockStructure* bs = m->block_structure();
  417. EXPECT_EQ(bs->cols.size(), column_blocks.size());
  418. for (int i = 0; i < column_blocks.size(); ++i) {
  419. EXPECT_EQ(bs->cols[i].size, column_blocks[i].size);
  420. EXPECT_EQ(bs->cols[i].position, column_blocks[i].position);
  421. }
  422. EXPECT_EQ(m->num_rows(), m->num_cols());
  423. Vector x = Vector::Ones(num_cols);
  424. Vector y = Vector::Zero(num_cols);
  425. m->RightMultiplyAndAccumulate(x.data(), y.data());
  426. for (int i = 0; i < num_cols; ++i) {
  427. EXPECT_NEAR(y[i], diagonal[i], std::numeric_limits<double>::epsilon());
  428. }
  429. }
  430. TEST(BlockSparseMatrix, ToDenseMatrix) {
  431. {
  432. std::unique_ptr<BlockSparseMatrix> m = CreateTestMatrixFromId(0);
  433. Matrix m_dense;
  434. m->ToDenseMatrix(&m_dense);
  435. EXPECT_EQ(m_dense.rows(), 4);
  436. EXPECT_EQ(m_dense.cols(), 6);
  437. Matrix m_expected(4, 6);
  438. m_expected << 1, 2, 0, 0, 0, 0, 3, 4, 0, 0, 0, 0, 0, 0, 5, 6, 7, 0, 0, 0, 8,
  439. 9, 10, 0;
  440. EXPECT_EQ(m_dense, m_expected);
  441. }
  442. {
  443. std::unique_ptr<BlockSparseMatrix> m = CreateTestMatrixFromId(1);
  444. Matrix m_dense;
  445. m->ToDenseMatrix(&m_dense);
  446. EXPECT_EQ(m_dense.rows(), 3);
  447. EXPECT_EQ(m_dense.cols(), 6);
  448. Matrix m_expected(3, 6);
  449. m_expected << 1, 2, 0, 5, 6, 0, 3, 4, 0, 7, 8, 0, 0, 0, 9, 0, 0, 0;
  450. EXPECT_EQ(m_dense, m_expected);
  451. }
  452. {
  453. std::unique_ptr<BlockSparseMatrix> m = CreateTestMatrixFromId(2);
  454. Matrix m_dense;
  455. m->ToDenseMatrix(&m_dense);
  456. EXPECT_EQ(m_dense.rows(), 3);
  457. EXPECT_EQ(m_dense.cols(), 6);
  458. Matrix m_expected(3, 6);
  459. m_expected << 1, 2, 0, 6, 7, 0, 3, 4, 0, 8, 9, 0, 0, 0, 5, 0, 0, 10;
  460. EXPECT_EQ(m_dense, m_expected);
  461. }
  462. }
  463. TEST(BlockSparseMatrix, ToCRSMatrix) {
  464. {
  465. std::unique_ptr<BlockSparseMatrix> m = CreateTestMatrixFromId(0);
  466. auto m_crs = m->ToCompressedRowSparseMatrix();
  467. std::vector<int> rows_expected = {0, 2, 4, 7, 10};
  468. std::vector<int> cols_expected = {0, 1, 0, 1, 2, 3, 4, 2, 3, 4};
  469. std::vector<double> values_expected = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
  470. for (int i = 0; i < rows_expected.size(); ++i) {
  471. EXPECT_EQ(m_crs->rows()[i], rows_expected[i]);
  472. }
  473. for (int i = 0; i < cols_expected.size(); ++i) {
  474. EXPECT_EQ(m_crs->cols()[i], cols_expected[i]);
  475. }
  476. for (int i = 0; i < values_expected.size(); ++i) {
  477. EXPECT_EQ(m_crs->values()[i], values_expected[i]);
  478. }
  479. }
  480. {
  481. std::unique_ptr<BlockSparseMatrix> m = CreateTestMatrixFromId(1);
  482. auto m_crs = m->ToCompressedRowSparseMatrix();
  483. std::vector<int> rows_expected = {0, 4, 8, 9};
  484. std::vector<int> cols_expected = {0, 1, 3, 4, 0, 1, 3, 4, 2};
  485. std::vector<double> values_expected = {1, 2, 5, 6, 3, 4, 7, 8, 9};
  486. for (int i = 0; i < rows_expected.size(); ++i) {
  487. EXPECT_EQ(m_crs->rows()[i], rows_expected[i]);
  488. }
  489. for (int i = 0; i < cols_expected.size(); ++i) {
  490. EXPECT_EQ(m_crs->cols()[i], cols_expected[i]);
  491. }
  492. for (int i = 0; i < values_expected.size(); ++i) {
  493. EXPECT_EQ(m_crs->values()[i], values_expected[i]);
  494. }
  495. }
  496. {
  497. std::unique_ptr<BlockSparseMatrix> m = CreateTestMatrixFromId(2);
  498. auto m_crs = m->ToCompressedRowSparseMatrix();
  499. std::vector<int> rows_expected = {0, 4, 8, 10};
  500. std::vector<int> cols_expected = {0, 1, 3, 4, 0, 1, 3, 4, 2, 5};
  501. std::vector<double> values_expected = {1, 2, 6, 7, 3, 4, 8, 9, 5, 10};
  502. for (int i = 0; i < rows_expected.size(); ++i) {
  503. EXPECT_EQ(m_crs->rows()[i], rows_expected[i]);
  504. }
  505. for (int i = 0; i < cols_expected.size(); ++i) {
  506. EXPECT_EQ(m_crs->cols()[i], cols_expected[i]);
  507. }
  508. for (int i = 0; i < values_expected.size(); ++i) {
  509. EXPECT_EQ(m_crs->values()[i], values_expected[i]);
  510. }
  511. }
  512. }
  513. TEST(BlockSparseMatrix, ToCRSMatrixTranspose) {
  514. {
  515. std::unique_ptr<BlockSparseMatrix> m = CreateTestMatrixFromId(0);
  516. auto m_crs_transpose = m->ToCompressedRowSparseMatrixTranspose();
  517. std::vector<int> rows_expected = {0, 2, 4, 6, 8, 10, 10};
  518. std::vector<int> cols_expected = {0, 1, 0, 1, 2, 3, 2, 3, 2, 3};
  519. std::vector<double> values_expected = {1, 3, 2, 4, 5, 8, 6, 9, 7, 10};
  520. EXPECT_EQ(m_crs_transpose->num_nonzeros(), cols_expected.size());
  521. EXPECT_EQ(m_crs_transpose->num_rows(), rows_expected.size() - 1);
  522. for (int i = 0; i < rows_expected.size(); ++i) {
  523. EXPECT_EQ(m_crs_transpose->rows()[i], rows_expected[i]);
  524. }
  525. for (int i = 0; i < cols_expected.size(); ++i) {
  526. EXPECT_EQ(m_crs_transpose->cols()[i], cols_expected[i]);
  527. }
  528. for (int i = 0; i < values_expected.size(); ++i) {
  529. EXPECT_EQ(m_crs_transpose->values()[i], values_expected[i]);
  530. }
  531. }
  532. {
  533. std::unique_ptr<BlockSparseMatrix> m = CreateTestMatrixFromId(1);
  534. auto m_crs_transpose = m->ToCompressedRowSparseMatrixTranspose();
  535. std::vector<int> rows_expected = {0, 2, 4, 5, 7, 9, 9};
  536. std::vector<int> cols_expected = {0, 1, 0, 1, 2, 0, 1, 0, 1};
  537. std::vector<double> values_expected = {1, 3, 2, 4, 9, 5, 7, 6, 8};
  538. EXPECT_EQ(m_crs_transpose->num_nonzeros(), cols_expected.size());
  539. EXPECT_EQ(m_crs_transpose->num_rows(), rows_expected.size() - 1);
  540. for (int i = 0; i < rows_expected.size(); ++i) {
  541. EXPECT_EQ(m_crs_transpose->rows()[i], rows_expected[i]);
  542. }
  543. for (int i = 0; i < cols_expected.size(); ++i) {
  544. EXPECT_EQ(m_crs_transpose->cols()[i], cols_expected[i]);
  545. }
  546. for (int i = 0; i < values_expected.size(); ++i) {
  547. EXPECT_EQ(m_crs_transpose->values()[i], values_expected[i]);
  548. }
  549. }
  550. {
  551. std::unique_ptr<BlockSparseMatrix> m = CreateTestMatrixFromId(2);
  552. auto m_crs_transpose = m->ToCompressedRowSparseMatrixTranspose();
  553. std::vector<int> rows_expected = {0, 2, 4, 5, 7, 9, 10};
  554. std::vector<int> cols_expected = {0, 1, 0, 1, 2, 0, 1, 0, 1, 2};
  555. std::vector<double> values_expected = {1, 3, 2, 4, 5, 6, 8, 7, 9, 10};
  556. EXPECT_EQ(m_crs_transpose->num_nonzeros(), cols_expected.size());
  557. EXPECT_EQ(m_crs_transpose->num_rows(), rows_expected.size() - 1);
  558. for (int i = 0; i < rows_expected.size(); ++i) {
  559. EXPECT_EQ(m_crs_transpose->rows()[i], rows_expected[i]);
  560. }
  561. for (int i = 0; i < cols_expected.size(); ++i) {
  562. EXPECT_EQ(m_crs_transpose->cols()[i], cols_expected[i]);
  563. }
  564. for (int i = 0; i < values_expected.size(); ++i) {
  565. EXPECT_EQ(m_crs_transpose->values()[i], values_expected[i]);
  566. }
  567. }
  568. }
  569. TEST(BlockSparseMatrix, CreateTranspose) {
  570. constexpr int kNumtrials = 10;
  571. BlockSparseMatrix::RandomMatrixOptions options;
  572. options.num_col_blocks = 10;
  573. options.min_col_block_size = 1;
  574. options.max_col_block_size = 3;
  575. options.num_row_blocks = 20;
  576. options.min_row_block_size = 1;
  577. options.max_row_block_size = 4;
  578. options.block_density = 0.25;
  579. std::mt19937 prng;
  580. for (int trial = 0; trial < kNumtrials; ++trial) {
  581. auto a = BlockSparseMatrix::CreateRandomMatrix(options, prng);
  582. auto ap_bs = std::make_unique<CompressedRowBlockStructure>();
  583. *ap_bs = *a->block_structure();
  584. BlockSparseMatrix ap(ap_bs.release());
  585. std::copy_n(a->values(), a->num_nonzeros(), ap.mutable_values());
  586. Vector x = Vector::Random(a->num_cols());
  587. Vector y = Vector::Random(a->num_rows());
  588. Vector a_x = Vector::Zero(a->num_rows());
  589. Vector a_t_y = Vector::Zero(a->num_cols());
  590. Vector ap_x = Vector::Zero(a->num_rows());
  591. Vector ap_t_y = Vector::Zero(a->num_cols());
  592. a->RightMultiplyAndAccumulate(x.data(), a_x.data());
  593. ap.RightMultiplyAndAccumulate(x.data(), ap_x.data());
  594. EXPECT_NEAR((a_x - ap_x).norm() / a_x.norm(),
  595. 0.0,
  596. std::numeric_limits<double>::epsilon());
  597. a->LeftMultiplyAndAccumulate(y.data(), a_t_y.data());
  598. ap.LeftMultiplyAndAccumulate(y.data(), ap_t_y.data());
  599. EXPECT_NEAR((a_t_y - ap_t_y).norm() / a_t_y.norm(),
  600. 0.0,
  601. std::numeric_limits<double>::epsilon());
  602. }
  603. }
  604. } // namespace internal
  605. } // namespace ceres