cuda_streamed_buffer_test.cc 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169
  1. // Ceres Solver - A fast non-linear least squares minimizer
  2. // Copyright 2023 Google Inc. All rights reserved.
  3. // http://ceres-solver.org/
  4. //
  5. // Redistribution and use in source and binary forms, with or without
  6. // modification, are permitted provided that the following conditions are met:
  7. //
  8. // * Redistributions of source code must retain the above copyright notice,
  9. // this list of conditions and the following disclaimer.
  10. // * Redistributions in binary form must reproduce the above copyright notice,
  11. // this list of conditions and the following disclaimer in the documentation
  12. // and/or other materials provided with the distribution.
  13. // * Neither the name of Google Inc. nor the names of its contributors may be
  14. // used to endorse or promote products derived from this software without
  15. // specific prior written permission.
  16. //
  17. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  18. // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  19. // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  20. // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  21. // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  22. // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  23. // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  24. // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  25. // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  26. // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  27. // POSSIBILITY OF SUCH DAMAGE.
  28. //
  29. // Authors: dmitriy.korchemkin@gmail.com (Dmitriy Korchemkin)
  30. #include "ceres/internal/config.h"
  31. #ifndef CERES_NO_CUDA
  32. #include <glog/logging.h>
  33. #include <gtest/gtest.h>
  34. #include <numeric>
  35. #include "ceres/cuda_streamed_buffer.h"
  36. namespace ceres::internal {
  37. TEST(CudaStreamedBufferTest, IntegerCopy) {
  38. // Offsets and sizes of batches supplied to callback
  39. std::vector<std::pair<int, int>> batches;
  40. const int kMaxTemporaryArraySize = 16;
  41. const int kInputSize = kMaxTemporaryArraySize * 7 + 3;
  42. ContextImpl context;
  43. std::string message;
  44. CHECK(context.InitCuda(&message)) << "InitCuda() failed because: " << message;
  45. std::vector<int> inputs(kInputSize);
  46. std::vector<int> outputs(kInputSize, -1);
  47. std::iota(inputs.begin(), inputs.end(), 0);
  48. CudaStreamedBuffer<int> streamed_buffer(&context, kMaxTemporaryArraySize);
  49. streamed_buffer.CopyToGpu(inputs.data(),
  50. kInputSize,
  51. [&outputs, &batches](const int* device_pointer,
  52. int size,
  53. int offset,
  54. cudaStream_t stream) {
  55. batches.emplace_back(offset, size);
  56. CHECK_EQ(cudaSuccess,
  57. cudaMemcpyAsync(outputs.data() + offset,
  58. device_pointer,
  59. sizeof(int) * size,
  60. cudaMemcpyDeviceToHost,
  61. stream));
  62. });
  63. // All operations in all streams should be completed when CopyToGpu returns
  64. // control to the callee
  65. for (int i = 0; i < ContextImpl::kNumCudaStreams; ++i) {
  66. CHECK_EQ(cudaSuccess, cudaStreamQuery(context.streams_[i]));
  67. }
  68. // Check if every element was visited
  69. for (int i = 0; i < kInputSize; ++i) {
  70. CHECK_EQ(outputs[i], i);
  71. }
  72. // Check if there is no overlap between batches
  73. std::sort(batches.begin(), batches.end());
  74. const int num_batches = batches.size();
  75. for (int i = 0; i < num_batches; ++i) {
  76. const auto [begin, size] = batches[i];
  77. const int end = begin + size;
  78. CHECK_GE(begin, 0);
  79. CHECK_LT(begin, kInputSize);
  80. CHECK_GT(size, 0);
  81. CHECK_LE(end, kInputSize);
  82. if (i + 1 == num_batches) continue;
  83. CHECK_EQ(end, batches[i + 1].first);
  84. }
  85. }
  86. TEST(CudaStreamedBufferTest, IntegerNoCopy) {
  87. // Offsets and sizes of batches supplied to callback
  88. std::vector<std::pair<int, int>> batches;
  89. const int kMaxTemporaryArraySize = 16;
  90. const int kInputSize = kMaxTemporaryArraySize * 7 + 3;
  91. ContextImpl context;
  92. std::string message;
  93. CHECK(context.InitCuda(&message)) << "InitCuda() failed because: " << message;
  94. int* inputs;
  95. int* outputs;
  96. CHECK_EQ(cudaSuccess,
  97. cudaHostAlloc(
  98. &inputs, sizeof(int) * kInputSize, cudaHostAllocWriteCombined));
  99. CHECK_EQ(
  100. cudaSuccess,
  101. cudaHostAlloc(&outputs, sizeof(int) * kInputSize, cudaHostAllocDefault));
  102. std::fill(outputs, outputs + kInputSize, -1);
  103. std::iota(inputs, inputs + kInputSize, 0);
  104. CudaStreamedBuffer<int> streamed_buffer(&context, kMaxTemporaryArraySize);
  105. streamed_buffer.CopyToGpu(inputs,
  106. kInputSize,
  107. [outputs, &batches](const int* device_pointer,
  108. int size,
  109. int offset,
  110. cudaStream_t stream) {
  111. batches.emplace_back(offset, size);
  112. CHECK_EQ(cudaSuccess,
  113. cudaMemcpyAsync(outputs + offset,
  114. device_pointer,
  115. sizeof(int) * size,
  116. cudaMemcpyDeviceToHost,
  117. stream));
  118. });
  119. // All operations in all streams should be completed when CopyToGpu returns
  120. // control to the callee
  121. for (int i = 0; i < ContextImpl::kNumCudaStreams; ++i) {
  122. CHECK_EQ(cudaSuccess, cudaStreamQuery(context.streams_[i]));
  123. }
  124. // Check if every element was visited
  125. for (int i = 0; i < kInputSize; ++i) {
  126. CHECK_EQ(outputs[i], i);
  127. }
  128. // Check if there is no overlap between batches
  129. std::sort(batches.begin(), batches.end());
  130. const int num_batches = batches.size();
  131. for (int i = 0; i < num_batches; ++i) {
  132. const auto [begin, size] = batches[i];
  133. const int end = begin + size;
  134. CHECK_GE(begin, 0);
  135. CHECK_LT(begin, kInputSize);
  136. CHECK_GT(size, 0);
  137. CHECK_LE(end, kInputSize);
  138. if (i + 1 == num_batches) continue;
  139. CHECK_EQ(end, batches[i + 1].first);
  140. }
  141. CHECK_EQ(cudaSuccess, cudaFreeHost(inputs));
  142. CHECK_EQ(cudaSuccess, cudaFreeHost(outputs));
  143. }
  144. } // namespace ceres::internal
  145. #endif // CERES_NO_CUDA