parallel_for_test.cc 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496
  1. // Ceres Solver - A fast non-linear least squares minimizer
  2. // Copyright 2023 Google Inc. All rights reserved.
  3. // http://ceres-solver.org/
  4. //
  5. // Redistribution and use in source and binary forms, with or without
  6. // modification, are permitted provided that the following conditions are met:
  7. //
  8. // * Redistributions of source code must retain the above copyright notice,
  9. // this list of conditions and the following disclaimer.
  10. // * Redistributions in binary form must reproduce the above copyright notice,
  11. // this list of conditions and the following disclaimer in the documentation
  12. // and/or other materials provided with the distribution.
  13. // * Neither the name of Google Inc. nor the names of its contributors may be
  14. // used to endorse or promote products derived from this software without
  15. // specific prior written permission.
  16. //
  17. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  18. // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  19. // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  20. // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  21. // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  22. // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  23. // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  24. // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  25. // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  26. // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  27. // POSSIBILITY OF SUCH DAMAGE.
  28. //
  29. // Author: vitus@google.com (Michael Vitus)
  30. #include "ceres/parallel_for.h"
  31. #include <atomic>
  32. #include <cmath>
  33. #include <condition_variable>
  34. #include <mutex>
  35. #include <numeric>
  36. #include <random>
  37. #include <thread>
  38. #include <tuple>
  39. #include <vector>
  40. #include "ceres/context_impl.h"
  41. #include "ceres/internal/config.h"
  42. #include "ceres/parallel_vector_ops.h"
  43. #include "glog/logging.h"
  44. #include "gmock/gmock.h"
  45. #include "gtest/gtest.h"
  46. namespace ceres::internal {
  47. using testing::ElementsAreArray;
  48. using testing::UnorderedElementsAreArray;
  49. // Tests the parallel for loop computes the correct result for various number of
  50. // threads.
  51. TEST(ParallelFor, NumThreads) {
  52. ContextImpl context;
  53. context.EnsureMinimumThreads(/*num_threads=*/2);
  54. const int size = 16;
  55. std::vector<int> expected_results(size, 0);
  56. for (int i = 0; i < size; ++i) {
  57. expected_results[i] = std::sqrt(i);
  58. }
  59. for (int num_threads = 1; num_threads <= 8; ++num_threads) {
  60. std::vector<int> values(size, 0);
  61. ParallelFor(&context, 0, size, num_threads, [&values](int i) {
  62. values[i] = std::sqrt(i);
  63. });
  64. EXPECT_THAT(values, ElementsAreArray(expected_results));
  65. }
  66. }
  67. // Tests parallel for loop with ranges
  68. TEST(ParallelForWithRange, NumThreads) {
  69. ContextImpl context;
  70. context.EnsureMinimumThreads(/*num_threads=*/2);
  71. const int size = 16;
  72. std::vector<int> expected_results(size, 0);
  73. for (int i = 0; i < size; ++i) {
  74. expected_results[i] = std::sqrt(i);
  75. }
  76. for (int num_threads = 1; num_threads <= 8; ++num_threads) {
  77. std::vector<int> values(size, 0);
  78. ParallelFor(
  79. &context, 0, size, num_threads, [&values](std::tuple<int, int> range) {
  80. auto [start, end] = range;
  81. for (int i = start; i < end; ++i) values[i] = std::sqrt(i);
  82. });
  83. EXPECT_THAT(values, ElementsAreArray(expected_results));
  84. }
  85. }
  86. // Tests parallel for loop with ranges and lower bound on minimal range size
  87. TEST(ParallelForWithRange, MinimalSize) {
  88. ContextImpl context;
  89. constexpr int kNumThreads = 4;
  90. constexpr int kMinBlockSize = 5;
  91. context.EnsureMinimumThreads(kNumThreads);
  92. for (int size = kMinBlockSize; size <= 25; ++size) {
  93. std::atomic<bool> failed(false);
  94. ParallelFor(
  95. &context,
  96. 0,
  97. size,
  98. kNumThreads,
  99. [&failed, kMinBlockSize](std::tuple<int, int> range) {
  100. auto [start, end] = range;
  101. if (end - start < kMinBlockSize) failed = true;
  102. },
  103. kMinBlockSize);
  104. EXPECT_EQ(failed, false);
  105. }
  106. }
  107. // Tests the parallel for loop with the thread ID interface computes the correct
  108. // result for various number of threads.
  109. TEST(ParallelForWithThreadId, NumThreads) {
  110. ContextImpl context;
  111. context.EnsureMinimumThreads(/*num_threads=*/2);
  112. const int size = 16;
  113. std::vector<int> expected_results(size, 0);
  114. for (int i = 0; i < size; ++i) {
  115. expected_results[i] = std::sqrt(i);
  116. }
  117. for (int num_threads = 1; num_threads <= 8; ++num_threads) {
  118. std::vector<int> values(size, 0);
  119. ParallelFor(
  120. &context, 0, size, num_threads, [&values](int thread_id, int i) {
  121. values[i] = std::sqrt(i);
  122. });
  123. EXPECT_THAT(values, ElementsAreArray(expected_results));
  124. }
  125. }
  126. // Tests nested for loops do not result in a deadlock.
  127. TEST(ParallelFor, NestedParallelForDeadlock) {
  128. ContextImpl context;
  129. context.EnsureMinimumThreads(/*num_threads=*/2);
  130. // Increment each element in the 2D matrix.
  131. std::vector<std::vector<int>> x(3, {1, 2, 3});
  132. ParallelFor(&context, 0, 3, 2, [&x, &context](int i) {
  133. std::vector<int>& y = x.at(i);
  134. ParallelFor(&context, 0, 3, 2, [&y](int j) { ++y.at(j); });
  135. });
  136. const std::vector<int> results = {2, 3, 4};
  137. for (const std::vector<int>& value : x) {
  138. EXPECT_THAT(value, ElementsAreArray(results));
  139. }
  140. }
  141. // Tests nested for loops do not result in a deadlock for the parallel for with
  142. // thread ID interface.
  143. TEST(ParallelForWithThreadId, NestedParallelForDeadlock) {
  144. ContextImpl context;
  145. context.EnsureMinimumThreads(/*num_threads=*/2);
  146. // Increment each element in the 2D matrix.
  147. std::vector<std::vector<int>> x(3, {1, 2, 3});
  148. ParallelFor(&context, 0, 3, 2, [&x, &context](int thread_id, int i) {
  149. std::vector<int>& y = x.at(i);
  150. ParallelFor(&context, 0, 3, 2, [&y](int thread_id, int j) { ++y.at(j); });
  151. });
  152. const std::vector<int> results = {2, 3, 4};
  153. for (const std::vector<int>& value : x) {
  154. EXPECT_THAT(value, ElementsAreArray(results));
  155. }
  156. }
  157. TEST(ParallelForWithThreadId, UniqueThreadIds) {
  158. // Ensure the hardware supports more than 1 thread to ensure the test will
  159. // pass.
  160. const int num_hardware_threads = std::thread::hardware_concurrency();
  161. if (num_hardware_threads <= 1) {
  162. LOG(ERROR)
  163. << "Test not supported, the hardware does not support threading.";
  164. return;
  165. }
  166. ContextImpl context;
  167. context.EnsureMinimumThreads(/*num_threads=*/2);
  168. // Increment each element in the 2D matrix.
  169. std::vector<int> x(2, -1);
  170. std::mutex mutex;
  171. std::condition_variable condition;
  172. int count = 0;
  173. ParallelFor(&context,
  174. 0,
  175. 2,
  176. 2,
  177. [&x, &mutex, &condition, &count](int thread_id, int i) {
  178. std::unique_lock<std::mutex> lock(mutex);
  179. x[i] = thread_id;
  180. ++count;
  181. condition.notify_all();
  182. condition.wait(lock, [&]() { return count == 2; });
  183. });
  184. EXPECT_THAT(x, UnorderedElementsAreArray({0, 1}));
  185. }
  186. // Helper function for partition tests
  187. bool BruteForcePartition(
  188. int* costs, int start, int end, int max_partitions, int max_cost);
  189. // Basic test if MaxPartitionCostIsFeasible and BruteForcePartition agree on
  190. // simple test-cases
  191. TEST(GuidedParallelFor, MaxPartitionCostIsFeasible) {
  192. std::vector<int> costs, cumulative_costs, partition;
  193. costs = {1, 2, 3, 5, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0};
  194. cumulative_costs.resize(costs.size());
  195. std::partial_sum(costs.begin(), costs.end(), cumulative_costs.begin());
  196. const auto dummy_getter = [](const int v) { return v; };
  197. // [1, 2, 3] [5], [0 ... 0, 7, 0, ... 0]
  198. EXPECT_TRUE(MaxPartitionCostIsFeasible(0,
  199. costs.size(),
  200. 3,
  201. 7,
  202. 0,
  203. cumulative_costs.data(),
  204. dummy_getter,
  205. &partition));
  206. EXPECT_TRUE(BruteForcePartition(costs.data(), 0, costs.size(), 3, 7));
  207. // [1, 2, 3, 5, 0 ... 0, 7, 0, ... 0]
  208. EXPECT_TRUE(MaxPartitionCostIsFeasible(0,
  209. costs.size(),
  210. 3,
  211. 18,
  212. 0,
  213. cumulative_costs.data(),
  214. dummy_getter,
  215. &partition));
  216. EXPECT_TRUE(BruteForcePartition(costs.data(), 0, costs.size(), 3, 18));
  217. // Impossible since there is item of cost 7
  218. EXPECT_FALSE(MaxPartitionCostIsFeasible(0,
  219. costs.size(),
  220. 3,
  221. 6,
  222. 0,
  223. cumulative_costs.data(),
  224. dummy_getter,
  225. &partition));
  226. EXPECT_FALSE(BruteForcePartition(costs.data(), 0, costs.size(), 3, 6));
  227. // Impossible
  228. EXPECT_FALSE(MaxPartitionCostIsFeasible(0,
  229. costs.size(),
  230. 2,
  231. 10,
  232. 0,
  233. cumulative_costs.data(),
  234. dummy_getter,
  235. &partition));
  236. EXPECT_FALSE(BruteForcePartition(costs.data(), 0, costs.size(), 2, 10));
  237. }
  238. // Randomized tests for MaxPartitionCostIsFeasible
  239. TEST(GuidedParallelFor, MaxPartitionCostIsFeasibleRandomized) {
  240. std::vector<int> costs, cumulative_costs, partition;
  241. const auto dummy_getter = [](const int v) { return v; };
  242. // Random tests
  243. const int kNumTests = 1000;
  244. const int kMaxElements = 32;
  245. const int kMaxPartitions = 16;
  246. const int kMaxElCost = 8;
  247. std::mt19937 rng;
  248. std::uniform_int_distribution<int> rng_N(1, kMaxElements);
  249. std::uniform_int_distribution<int> rng_M(1, kMaxPartitions);
  250. std::uniform_int_distribution<int> rng_e(0, kMaxElCost);
  251. for (int t = 0; t < kNumTests; ++t) {
  252. const int N = rng_N(rng);
  253. const int M = rng_M(rng);
  254. int total = 0;
  255. costs.clear();
  256. for (int i = 0; i < N; ++i) {
  257. costs.push_back(rng_e(rng));
  258. total += costs.back();
  259. }
  260. cumulative_costs.resize(N);
  261. std::partial_sum(costs.begin(), costs.end(), cumulative_costs.begin());
  262. std::uniform_int_distribution<int> rng_seg(0, N - 1);
  263. int start = rng_seg(rng);
  264. int end = rng_seg(rng);
  265. if (start > end) std::swap(start, end);
  266. ++end;
  267. int first_admissible = 0;
  268. for (int threshold = 1; threshold <= total; ++threshold) {
  269. const bool bruteforce =
  270. BruteForcePartition(costs.data(), start, end, M, threshold);
  271. if (bruteforce && !first_admissible) {
  272. first_admissible = threshold;
  273. }
  274. const bool binary_search =
  275. MaxPartitionCostIsFeasible(start,
  276. end,
  277. M,
  278. threshold,
  279. start ? cumulative_costs[start - 1] : 0,
  280. cumulative_costs.data(),
  281. dummy_getter,
  282. &partition);
  283. EXPECT_EQ(bruteforce, binary_search);
  284. EXPECT_LE(partition.size(), M + 1);
  285. // check partition itself
  286. if (binary_search) {
  287. ASSERT_GT(partition.size(), 1);
  288. EXPECT_EQ(partition.front(), start);
  289. EXPECT_EQ(partition.back(), end);
  290. const int num_partitions = partition.size() - 1;
  291. EXPECT_LE(num_partitions, M);
  292. for (int j = 0; j < num_partitions; ++j) {
  293. int total = 0;
  294. for (int k = partition[j]; k < partition[j + 1]; ++k) {
  295. EXPECT_LT(k, end);
  296. EXPECT_GE(k, start);
  297. total += costs[k];
  298. }
  299. EXPECT_LE(total, threshold);
  300. }
  301. }
  302. }
  303. }
  304. }
  305. TEST(GuidedParallelFor, PartitionRangeForParallelFor) {
  306. std::vector<int> costs, cumulative_costs, partition;
  307. const auto dummy_getter = [](const int v) { return v; };
  308. // Random tests
  309. const int kNumTests = 1000;
  310. const int kMaxElements = 32;
  311. const int kMaxPartitions = 16;
  312. const int kMaxElCost = 8;
  313. std::mt19937 rng;
  314. std::uniform_int_distribution<int> rng_N(1, kMaxElements);
  315. std::uniform_int_distribution<int> rng_M(1, kMaxPartitions);
  316. std::uniform_int_distribution<int> rng_e(0, kMaxElCost);
  317. for (int t = 0; t < kNumTests; ++t) {
  318. const int N = rng_N(rng);
  319. const int M = rng_M(rng);
  320. int total = 0;
  321. costs.clear();
  322. for (int i = 0; i < N; ++i) {
  323. costs.push_back(rng_e(rng));
  324. total += costs.back();
  325. }
  326. cumulative_costs.resize(N);
  327. std::partial_sum(costs.begin(), costs.end(), cumulative_costs.begin());
  328. std::uniform_int_distribution<int> rng_seg(0, N - 1);
  329. int start = rng_seg(rng);
  330. int end = rng_seg(rng);
  331. if (start > end) std::swap(start, end);
  332. ++end;
  333. int first_admissible = 0;
  334. for (int threshold = 1; threshold <= total; ++threshold) {
  335. const bool bruteforce =
  336. BruteForcePartition(costs.data(), start, end, M, threshold);
  337. if (bruteforce) {
  338. first_admissible = threshold;
  339. break;
  340. }
  341. }
  342. EXPECT_TRUE(first_admissible != 0 || total == 0);
  343. partition = PartitionRangeForParallelFor(
  344. start, end, M, cumulative_costs.data(), dummy_getter);
  345. ASSERT_GT(partition.size(), 1);
  346. EXPECT_EQ(partition.front(), start);
  347. EXPECT_EQ(partition.back(), end);
  348. const int num_partitions = partition.size() - 1;
  349. EXPECT_LE(num_partitions, M);
  350. for (int j = 0; j < num_partitions; ++j) {
  351. int total = 0;
  352. for (int k = partition[j]; k < partition[j + 1]; ++k) {
  353. EXPECT_LT(k, end);
  354. EXPECT_GE(k, start);
  355. total += costs[k];
  356. }
  357. EXPECT_LE(total, first_admissible);
  358. }
  359. }
  360. }
  361. // Recursively try to partition range into segements of total cost
  362. // less than max_cost
  363. bool BruteForcePartition(
  364. int* costs, int start, int end, int max_partitions, int max_cost) {
  365. if (start == end) return true;
  366. if (start < end && max_partitions == 0) return false;
  367. int total_cost = 0;
  368. for (int last_curr = start + 1; last_curr <= end; ++last_curr) {
  369. total_cost += costs[last_curr - 1];
  370. if (total_cost > max_cost) break;
  371. if (BruteForcePartition(
  372. costs, last_curr, end, max_partitions - 1, max_cost))
  373. return true;
  374. }
  375. return false;
  376. }
  377. // Tests if guided parallel for loop computes the correct result for various
  378. // number of threads.
  379. TEST(GuidedParallelFor, NumThreads) {
  380. ContextImpl context;
  381. context.EnsureMinimumThreads(/*num_threads=*/2);
  382. const int size = 16;
  383. std::vector<int> expected_results(size, 0);
  384. for (int i = 0; i < size; ++i) {
  385. expected_results[i] = std::sqrt(i);
  386. }
  387. std::vector<int> costs, cumulative_costs;
  388. for (int i = 1; i <= size; ++i) {
  389. int cost = i * i;
  390. costs.push_back(cost);
  391. if (i == 1) {
  392. cumulative_costs.push_back(cost);
  393. } else {
  394. cumulative_costs.push_back(cost + cumulative_costs.back());
  395. }
  396. }
  397. for (int num_threads = 1; num_threads <= 8; ++num_threads) {
  398. std::vector<int> values(size, 0);
  399. ParallelFor(
  400. &context,
  401. 0,
  402. size,
  403. num_threads,
  404. [&values](int i) { values[i] = std::sqrt(i); },
  405. cumulative_costs.data(),
  406. [](const int v) { return v; });
  407. EXPECT_THAT(values, ElementsAreArray(expected_results));
  408. }
  409. }
  410. TEST(ParallelAssign, D2MulX) {
  411. const int kVectorSize = 1024 * 1024;
  412. const int kMaxNumThreads = 8;
  413. const double kEpsilon = 1e-16;
  414. const Vector D_full = Vector::Random(kVectorSize * 2);
  415. const ConstVectorRef D(D_full.data() + kVectorSize, kVectorSize);
  416. const Vector x = Vector::Random(kVectorSize);
  417. const Vector y_expected = D.array().square() * x.array();
  418. ContextImpl context;
  419. context.EnsureMinimumThreads(kMaxNumThreads);
  420. for (int num_threads = 1; num_threads <= kMaxNumThreads; ++num_threads) {
  421. Vector y_observed(kVectorSize);
  422. ParallelAssign(
  423. &context, num_threads, y_observed, D.array().square() * x.array());
  424. // We might get non-bit-exact result due to different precision in scalar
  425. // and vector code. For example, in x86 mode mingw might emit x87
  426. // instructions for scalar code, thus making bit-exact check fail
  427. EXPECT_NEAR((y_expected - y_observed).squaredNorm(),
  428. 0.,
  429. kEpsilon * y_expected.squaredNorm());
  430. }
  431. }
  432. TEST(ParallelAssign, SetZero) {
  433. const int kVectorSize = 1024 * 1024;
  434. const int kMaxNumThreads = 8;
  435. ContextImpl context;
  436. context.EnsureMinimumThreads(kMaxNumThreads);
  437. for (int num_threads = 1; num_threads <= kMaxNumThreads; ++num_threads) {
  438. Vector x = Vector::Random(kVectorSize);
  439. ParallelSetZero(&context, num_threads, x);
  440. CHECK_EQ(x.squaredNorm(), 0.);
  441. }
  442. }
  443. } // namespace ceres::internal