cuda_buffer.h 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172
  1. // Ceres Solver - A fast non-linear least squares minimizer
  2. // Copyright 2023 Google Inc. All rights reserved.
  3. // http://ceres-solver.org/
  4. //
  5. // Redistribution and use in source and binary forms, with or without
  6. // modification, are permitted provided that the following conditions are met:
  7. //
  8. // * Redistributions of source code must retain the above copyright notice,
  9. // this list of conditions and the following disclaimer.
  10. // * Redistributions in binary form must reproduce the above copyright notice,
  11. // this list of conditions and the following disclaimer in the documentation
  12. // and/or other materials provided with the distribution.
  13. // * Neither the name of Google Inc. nor the names of its contributors may be
  14. // used to endorse or promote products derived from this software without
  15. // specific prior written permission.
  16. //
  17. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  18. // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  19. // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  20. // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  21. // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  22. // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  23. // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  24. // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  25. // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  26. // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  27. // POSSIBILITY OF SUCH DAMAGE.
  28. //
  29. // Author: joydeepb@cs.utexas.edu (Joydeep Biswas)
  30. #ifndef CERES_INTERNAL_CUDA_BUFFER_H_
  31. #define CERES_INTERNAL_CUDA_BUFFER_H_
  32. #include "ceres/context_impl.h"
  33. #include "ceres/internal/config.h"
  34. #ifndef CERES_NO_CUDA
  35. #include <vector>
  36. #include "cuda_runtime.h"
  37. #include "glog/logging.h"
  38. namespace ceres::internal {
  39. // An encapsulated buffer to maintain GPU memory, and handle transfers between
  40. // GPU and system memory. It is the responsibility of the user to ensure that
  41. // the appropriate GPU device is selected before each subroutine is called. This
  42. // is particularly important when using multiple GPU devices on different CPU
  43. // threads, since active Cuda devices are determined by the cuda runtime on a
  44. // per-thread basis.
  45. template <typename T>
  46. class CudaBuffer {
  47. public:
  48. explicit CudaBuffer(ContextImpl* context) : context_(context) {}
  49. CudaBuffer(ContextImpl* context, int size) : context_(context) {
  50. Reserve(size);
  51. }
  52. CudaBuffer(CudaBuffer&& other)
  53. : data_(other.data_), size_(other.size_), context_(other.context_) {
  54. other.data_ = nullptr;
  55. other.size_ = 0;
  56. }
  57. CudaBuffer(const CudaBuffer&) = delete;
  58. CudaBuffer& operator=(const CudaBuffer&) = delete;
  59. ~CudaBuffer() {
  60. if (data_ != nullptr) {
  61. CHECK_EQ(cudaFree(data_), cudaSuccess);
  62. }
  63. }
  64. // Grow the GPU memory buffer if needed to accommodate data of the specified
  65. // size
  66. void Reserve(const size_t size) {
  67. if (size > size_) {
  68. if (data_ != nullptr) {
  69. CHECK_EQ(cudaFree(data_), cudaSuccess);
  70. }
  71. CHECK_EQ(cudaMalloc(&data_, size * sizeof(T)), cudaSuccess)
  72. << "Failed to allocate " << size * sizeof(T)
  73. << " bytes of GPU memory";
  74. size_ = size;
  75. }
  76. }
  77. // Perform an asynchronous copy from CPU memory to GPU memory managed by this
  78. // CudaBuffer instance using the stream provided.
  79. void CopyFromCpu(const T* data, const size_t size) {
  80. Reserve(size);
  81. CHECK_EQ(cudaMemcpyAsync(data_,
  82. data,
  83. size * sizeof(T),
  84. cudaMemcpyHostToDevice,
  85. context_->DefaultStream()),
  86. cudaSuccess);
  87. }
  88. // Perform an asynchronous copy from a vector in CPU memory to GPU memory
  89. // managed by this CudaBuffer instance.
  90. void CopyFromCpuVector(const std::vector<T>& data) {
  91. Reserve(data.size());
  92. CHECK_EQ(cudaMemcpyAsync(data_,
  93. data.data(),
  94. data.size() * sizeof(T),
  95. cudaMemcpyHostToDevice,
  96. context_->DefaultStream()),
  97. cudaSuccess);
  98. }
  99. // Perform an asynchronous copy from another GPU memory array to the GPU
  100. // memory managed by this CudaBuffer instance using the stream provided.
  101. void CopyFromGPUArray(const T* data, const size_t size) {
  102. Reserve(size);
  103. CHECK_EQ(cudaMemcpyAsync(data_,
  104. data,
  105. size * sizeof(T),
  106. cudaMemcpyDeviceToDevice,
  107. context_->DefaultStream()),
  108. cudaSuccess);
  109. }
  110. // Copy data from the GPU memory managed by this CudaBuffer instance to CPU
  111. // memory. It is the caller's responsibility to ensure that the CPU memory
  112. // pointer is valid, i.e. it is not null, and that it points to memory of
  113. // at least this->size() size. This method ensures all previously dispatched
  114. // GPU operations on the specified stream have completed before copying the
  115. // data to CPU memory.
  116. void CopyToCpu(T* data, const size_t size) const {
  117. CHECK(data_ != nullptr);
  118. CHECK_EQ(cudaMemcpyAsync(data,
  119. data_,
  120. size * sizeof(T),
  121. cudaMemcpyDeviceToHost,
  122. context_->DefaultStream()),
  123. cudaSuccess);
  124. CHECK_EQ(cudaStreamSynchronize(context_->DefaultStream()), cudaSuccess);
  125. }
  126. // Copy N items from another GPU memory array to the GPU memory managed by
  127. // this CudaBuffer instance, growing this buffer's size if needed. This copy
  128. // is asynchronous, and operates on the stream provided.
  129. void CopyNItemsFrom(int n, const CudaBuffer<T>& other) {
  130. Reserve(n);
  131. CHECK(other.data_ != nullptr);
  132. CHECK(data_ != nullptr);
  133. CHECK_EQ(cudaMemcpyAsync(data_,
  134. other.data_,
  135. size_ * sizeof(T),
  136. cudaMemcpyDeviceToDevice,
  137. context_->DefaultStream()),
  138. cudaSuccess);
  139. }
  140. // Return a pointer to the GPU memory managed by this CudaBuffer instance.
  141. T* data() { return data_; }
  142. const T* data() const { return data_; }
  143. // Return the number of items of type T that can fit in the GPU memory
  144. // allocated so far by this CudaBuffer instance.
  145. size_t size() const { return size_; }
  146. private:
  147. T* data_ = nullptr;
  148. size_t size_ = 0;
  149. ContextImpl* context_ = nullptr;
  150. };
  151. } // namespace ceres::internal
  152. #endif // CERES_NO_CUDA
  153. #endif // CERES_INTERNAL_CUDA_BUFFER_H_